feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit 286de54384
6841 changed files with 1848356 additions and 0 deletions
@@ -0,0 +1,223 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use pezkuwi_erasure_coding::systematic_recovery_threshold;
use pezkuwi_primitives::{node_features, ChunkIndex, CoreIndex, NodeFeatures, ValidatorIndex};
/// Compute the per-validator availability chunk index.
/// WARNING: THIS FUNCTION IS CRITICAL TO TEYRCHAIN CONSENSUS.
/// Any modification to the output of the function needs to be coordinated via the runtime.
/// It's best to use minimal/no external dependencies.
pub fn availability_chunk_index(
node_features: &NodeFeatures,
n_validators: usize,
core_index: CoreIndex,
validator_index: ValidatorIndex,
) -> Result<ChunkIndex, pezkuwi_erasure_coding::Error> {
if node_features
.get(usize::from(node_features::FeatureIndex::AvailabilityChunkMapping as u8))
.map(|bitref| *bitref)
.unwrap_or_default()
{
let systematic_threshold = systematic_recovery_threshold(n_validators)? as u32;
let core_start_pos = core_index.0 * systematic_threshold;
return Ok(ChunkIndex((core_start_pos + validator_index.0) % n_validators as u32));
}
Ok(validator_index.into())
}
/// Compute the per-core availability chunk indices. Returns a Vec which maps ValidatorIndex to
/// ChunkIndex for a given availability core index
/// WARNING: THIS FUNCTION IS CRITICAL TO TEYRCHAIN CONSENSUS.
/// Any modification to the output of the function needs to be coordinated via the
/// runtime. It's best to use minimal/no external dependencies.
pub fn availability_chunk_indices(
node_features: &NodeFeatures,
n_validators: usize,
core_index: CoreIndex,
) -> Result<Vec<ChunkIndex>, pezkuwi_erasure_coding::Error> {
let identity = (0..n_validators).map(|index| ChunkIndex(index as u32));
if node_features
.get(usize::from(node_features::FeatureIndex::AvailabilityChunkMapping as u8))
.map(|bitref| *bitref)
.unwrap_or_default()
{
let systematic_threshold = systematic_recovery_threshold(n_validators)? as u32;
let core_start_pos = core_index.0 * systematic_threshold;
return Ok(identity
.into_iter()
.cycle()
.skip(core_start_pos as usize)
.take(n_validators)
.collect());
}
Ok(identity.collect())
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
pub fn node_features_with_mapping_enabled() -> NodeFeatures {
let mut node_features = NodeFeatures::new();
node_features
.resize(node_features::FeatureIndex::AvailabilityChunkMapping as usize + 1, false);
node_features
.set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, true);
node_features
}
pub fn node_features_with_other_bits_enabled() -> NodeFeatures {
let mut node_features = NodeFeatures::new();
node_features.resize(node_features::FeatureIndex::FirstUnassigned as usize + 1, true);
node_features
.set(node_features::FeatureIndex::AvailabilityChunkMapping as u8 as usize, false);
node_features
}
#[test]
fn test_availability_chunk_indices() {
let n_validators = 20u32;
let n_cores = 15u32;
// If the mapping feature is not enabled, it should always be the identity vector.
{
for node_features in [NodeFeatures::EMPTY, node_features_with_other_bits_enabled()] {
for core_index in 0..n_cores {
let indices = availability_chunk_indices(
node_features.as_ref(),
n_validators as usize,
CoreIndex(core_index),
)
.unwrap();
for validator_index in 0..n_validators {
assert_eq!(
indices[validator_index as usize],
availability_chunk_index(
node_features.as_ref(),
n_validators as usize,
CoreIndex(core_index),
ValidatorIndex(validator_index)
)
.unwrap()
)
}
assert_eq!(
indices,
(0..n_validators).map(|i| ChunkIndex(i)).collect::<Vec<_>>()
);
}
}
}
// Test when mapping feature is enabled.
{
let node_features = node_features_with_mapping_enabled();
let mut previous_indices = None;
for core_index in 0..n_cores {
let indices = availability_chunk_indices(
&node_features,
n_validators as usize,
CoreIndex(core_index),
)
.unwrap();
for validator_index in 0..n_validators {
assert_eq!(
indices[validator_index as usize],
availability_chunk_index(
&node_features,
n_validators as usize,
CoreIndex(core_index),
ValidatorIndex(validator_index)
)
.unwrap()
)
}
// Check that it's not equal to the previous core's indices.
if let Some(previous_indices) = previous_indices {
assert_ne!(previous_indices, indices);
}
previous_indices = Some(indices.clone());
// Check that it's indeed a permutation.
assert_eq!(
(0..n_validators).map(|i| ChunkIndex(i)).collect::<HashSet<_>>(),
indices.into_iter().collect::<HashSet<_>>()
);
}
}
}
#[test]
// This is just a dummy test that checks the mapping against some hardcoded outputs, to prevent
// accidental changes to the algorithms.
fn prevent_changes_to_mapping() {
let n_validators = 7;
let node_features = node_features_with_mapping_enabled();
assert_eq!(
availability_chunk_indices(&node_features, n_validators, CoreIndex(0))
.unwrap()
.into_iter()
.map(|i| i.0)
.collect::<Vec<u32>>(),
vec![0, 1, 2, 3, 4, 5, 6]
);
assert_eq!(
availability_chunk_indices(&node_features, n_validators, CoreIndex(1))
.unwrap()
.into_iter()
.map(|i| i.0)
.collect::<Vec<u32>>(),
vec![2, 3, 4, 5, 6, 0, 1]
);
assert_eq!(
availability_chunk_indices(&node_features, n_validators, CoreIndex(2))
.unwrap()
.into_iter()
.map(|i| i.0)
.collect::<Vec<u32>>(),
vec![4, 5, 6, 0, 1, 2, 3]
);
assert_eq!(
availability_chunk_indices(&node_features, n_validators, CoreIndex(3))
.unwrap()
.into_iter()
.map(|i| i.0)
.collect::<Vec<u32>>(),
vec![6, 0, 1, 2, 3, 4, 5]
);
assert_eq!(
availability_chunk_indices(&node_features, n_validators, CoreIndex(4))
.unwrap()
.into_iter()
.map(|i| i.0)
.collect::<Vec<u32>>(),
vec![1, 2, 3, 4, 5, 6, 0]
);
}
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,53 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! `ControlledValidatorIndices` implementation.
use pezkuwi_primitives::{IndexedVec, SessionIndex, ValidatorId, ValidatorIndex};
use schnellru::{ByLength, LruMap};
use sp_keystore::KeystorePtr;
/// Keeps track of the validator indices controlled by the local validator in a given session. For
/// better performance, the values for each session are cached.
pub struct ControlledValidatorIndices {
/// The indices of the controlled validators, cached by session.
controlled_validator_indices: LruMap<SessionIndex, Option<ValidatorIndex>>,
keystore: KeystorePtr,
}
impl ControlledValidatorIndices {
/// Create a new instance of `ControlledValidatorIndices`.
pub fn new(keystore: KeystorePtr, cache_size: u32) -> Self {
let controlled_validator_indices = LruMap::new(ByLength::new(cache_size));
Self { controlled_validator_indices, keystore }
}
/// Get the controlled validator indices for a given session. If the indices are not known they
/// will be fetched from `session_validators` and cached.
pub fn get(
&mut self,
session: SessionIndex,
session_validators: &IndexedVec<ValidatorIndex, ValidatorId>,
) -> Option<ValidatorIndex> {
self.controlled_validator_indices
.get_or_insert(session, || {
crate::signing_key_and_index(session_validators.iter(), &self.keystore)
.map(|(_, index)| index)
})
.copied()
.expect("We just inserted the controlled indices; qed")
}
}
+310
View File
@@ -0,0 +1,310 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Database trait for pezkuwi db.
pub use kvdb::{DBKeyValue, DBTransaction, DBValue, KeyValueDB};
/// Database trait with ordered key capacity.
pub trait Database: KeyValueDB {
/// Check if column allows content iteration
/// and removal by prefix.
fn is_indexed_column(&self, col: u32) -> bool;
}
/// Implementation for database supporting `KeyValueDB` already.
pub mod kvdb_impl {
use super::{DBKeyValue, DBTransaction, DBValue, Database, KeyValueDB};
use kvdb::{DBOp, IoStats, IoStatsKind};
use std::{collections::BTreeSet, io::Result};
/// Adapter implementing subsystem database
/// for `KeyValueDB`.
#[derive(Clone)]
pub struct DbAdapter<D> {
db: D,
indexed_columns: BTreeSet<u32>,
}
impl<D: KeyValueDB> DbAdapter<D> {
/// Instantiate new subsystem database, with
/// the columns that allow ordered iteration.
pub fn new(db: D, indexed_columns: &[u32]) -> Self {
DbAdapter { db, indexed_columns: indexed_columns.iter().cloned().collect() }
}
fn ensure_is_indexed(&self, col: u32) {
debug_assert!(
self.is_indexed_column(col),
"Invalid configuration of database, column {} is not ordered.",
col
);
}
fn ensure_ops_indexing(&self, transaction: &DBTransaction) {
debug_assert!({
let mut pass = true;
for op in &transaction.ops {
if let DBOp::DeletePrefix { col, .. } = op {
if !self.is_indexed_column(*col) {
pass = false;
break;
}
}
}
pass
})
}
}
impl<D: KeyValueDB> Database for DbAdapter<D> {
fn is_indexed_column(&self, col: u32) -> bool {
self.indexed_columns.contains(&col)
}
}
impl<D: KeyValueDB> KeyValueDB for DbAdapter<D> {
fn transaction(&self) -> DBTransaction {
self.db.transaction()
}
fn get(&self, col: u32, key: &[u8]) -> Result<Option<DBValue>> {
self.db.get(col, key)
}
fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Result<Option<DBValue>> {
self.ensure_is_indexed(col);
self.db.get_by_prefix(col, prefix)
}
fn write(&self, transaction: DBTransaction) -> Result<()> {
self.ensure_ops_indexing(&transaction);
self.db.write(transaction)
}
fn iter<'a>(&'a self, col: u32) -> Box<dyn Iterator<Item = Result<DBKeyValue>> + 'a> {
self.ensure_is_indexed(col);
self.db.iter(col)
}
fn iter_with_prefix<'a>(
&'a self,
col: u32,
prefix: &'a [u8],
) -> Box<dyn Iterator<Item = Result<DBKeyValue>> + 'a> {
self.ensure_is_indexed(col);
self.db.iter_with_prefix(col, prefix)
}
fn io_stats(&self, kind: IoStatsKind) -> IoStats {
self.db.io_stats(kind)
}
fn has_key(&self, col: u32, key: &[u8]) -> Result<bool> {
self.db.has_key(col, key)
}
fn has_prefix(&self, col: u32, prefix: &[u8]) -> Result<bool> {
self.ensure_is_indexed(col);
self.db.has_prefix(col, prefix)
}
}
}
/// Utilities for using parity-db database.
pub mod paritydb_impl {
use super::{DBKeyValue, DBTransaction, DBValue, Database, KeyValueDB};
use kvdb::DBOp;
use parity_db::Db;
use parking_lot::Mutex;
use std::{collections::BTreeSet, io::Result, sync::Arc};
fn handle_err<T>(result: parity_db::Result<T>) -> T {
match result {
Ok(r) => r,
Err(e) => {
panic!("Critical database error: {:?}", e);
},
}
}
fn map_err<T>(result: parity_db::Result<T>) -> Result<T> {
result.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("{:?}", e)))
}
/// Implementation of of `Database` for parity-db adapter.
pub struct DbAdapter {
db: Db,
indexed_columns: BTreeSet<u32>,
write_lock: Arc<Mutex<()>>,
}
impl KeyValueDB for DbAdapter {
fn transaction(&self) -> DBTransaction {
DBTransaction::new()
}
fn get(&self, col: u32, key: &[u8]) -> Result<Option<DBValue>> {
map_err(self.db.get(col as u8, key))
}
fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Result<Option<DBValue>> {
self.iter_with_prefix(col, prefix)
.next()
.transpose()
.map(|mb| mb.map(|(_, v)| v))
}
fn iter<'a>(&'a self, col: u32) -> Box<dyn Iterator<Item = Result<DBKeyValue>> + 'a> {
let mut iter = match self.db.iter(col as u8) {
Ok(iter) => iter,
Err(e) => return Box::new(std::iter::once(map_err(Err(e)))),
};
Box::new(std::iter::from_fn(move || {
iter.next().transpose().map(|r| map_err(r.map(|(k, v)| (k.into(), v))))
}))
}
fn iter_with_prefix<'a>(
&'a self,
col: u32,
prefix: &'a [u8],
) -> Box<dyn Iterator<Item = Result<DBKeyValue>> + 'a> {
if prefix.len() == 0 {
return self.iter(col);
}
let mut iter = match self.db.iter(col as u8) {
Ok(iter) => iter,
Err(e) => return Box::new(std::iter::once(map_err(Err(e)))),
};
if let Err(e) = iter.seek(prefix) {
return Box::new(std::iter::once(map_err(Err(e))));
}
Box::new(std::iter::from_fn(move || {
iter.next().transpose().and_then(|r| {
map_err(r.map(|(k, v)| k.starts_with(prefix).then(|| (k.into(), v))))
.transpose()
})
}))
}
fn write(&self, transaction: DBTransaction) -> Result<()> {
let mut ops = transaction.ops.into_iter();
// TODO using a key iterator or native delete here would be faster.
let mut current_prefix_iter: Option<(parity_db::BTreeIterator, u8, Vec<u8>)> = None;
let current_prefix_iter = &mut current_prefix_iter;
let transaction = std::iter::from_fn(move || loop {
if let Some((prefix_iter, col, prefix)) = current_prefix_iter {
if let Some((key, _value)) = handle_err(prefix_iter.next()) {
if key.starts_with(prefix) {
return Some((*col, key.to_vec(), None));
}
}
*current_prefix_iter = None;
}
return match ops.next() {
None => None,
Some(DBOp::Insert { col, key, value }) =>
Some((col as u8, key.to_vec(), Some(value))),
Some(DBOp::Delete { col, key }) => Some((col as u8, key.to_vec(), None)),
Some(DBOp::DeletePrefix { col, prefix }) => {
let col = col as u8;
let mut iter = handle_err(self.db.iter(col));
handle_err(iter.seek(&prefix[..]));
*current_prefix_iter = Some((iter, col, prefix.to_vec()));
continue;
},
};
});
// Locking is required due to possible racy change of the content of a deleted prefix.
let _lock = self.write_lock.lock();
map_err(self.db.commit(transaction))
}
}
impl Database for DbAdapter {
fn is_indexed_column(&self, col: u32) -> bool {
self.indexed_columns.contains(&col)
}
}
impl DbAdapter {
/// Implementation of of `Database` for parity-db adapter.
pub fn new(db: Db, indexed_columns: &[u32]) -> Self {
let write_lock = Arc::new(Mutex::new(()));
DbAdapter { db, indexed_columns: indexed_columns.iter().cloned().collect(), write_lock }
}
}
#[cfg(test)]
mod tests {
use super::*;
use kvdb_shared_tests as st;
use std::io;
use tempfile::Builder as TempfileBuilder;
fn create(num_col: u32) -> io::Result<(DbAdapter, tempfile::TempDir)> {
let tempdir = TempfileBuilder::new().prefix("").tempdir()?;
let mut options = parity_db::Options::with_columns(tempdir.path(), num_col as u8);
for i in 0..num_col {
options.columns[i as usize].btree_index = true;
}
let db = parity_db::Db::open_or_create(&options)
.map_err(|err| io::Error::new(io::ErrorKind::Other, format!("{:?}", err)))?;
let db = DbAdapter::new(db, &[0]);
Ok((db, tempdir))
}
#[test]
fn put_and_get() -> io::Result<()> {
let (db, _temp_file) = create(1)?;
st::test_put_and_get(&db)
}
#[test]
fn delete_and_get() -> io::Result<()> {
let (db, _temp_file) = create(1)?;
st::test_delete_and_get(&db)
}
#[test]
fn delete_prefix() -> io::Result<()> {
let (db, _temp_file) = create(st::DELETE_PREFIX_NUM_COLUMNS)?;
st::test_delete_prefix(&db)
}
#[test]
fn iter() -> io::Result<()> {
let (db, _temp_file) = create(1)?;
st::test_iter(&db)
}
#[test]
fn iter_with_prefix() -> io::Result<()> {
let (db, _temp_file) = create(1)?;
st::test_iter_with_prefix(&db)
}
#[test]
fn complex() -> io::Result<()> {
let (db, _temp_file) = create(1)?;
st::test_complex(&db)
}
}
}
@@ -0,0 +1,601 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A utility for fetching all unknown blocks based on a new chain-head hash.
use futures::{channel::oneshot, prelude::*};
use pezkuwi_node_subsystem::{messages::ChainApiMessage, SubsystemSender};
use pezkuwi_primitives::{BlockNumber, Hash, Header};
/// Given a new chain-head hash, this determines the hashes of all new blocks we should track
/// metadata for, given this head.
///
/// This is guaranteed to be a subset of the (inclusive) ancestry of `head` determined as all
/// blocks above the lower bound or above the highest known block, whichever is higher.
/// This is formatted in descending order by block height.
///
/// An implication of this is that if `head` itself is known or not above the lower bound,
/// then the returned list will be empty.
///
/// This may be somewhat expensive when first recovering from major sync.
pub async fn determine_new_blocks<E, Sender>(
sender: &mut Sender,
is_known: impl Fn(&Hash) -> Result<bool, E>,
head: Hash,
header: &Header,
lower_bound_number: BlockNumber,
) -> Result<Vec<(Hash, Header)>, E>
where
Sender: SubsystemSender<ChainApiMessage>,
{
const ANCESTRY_STEP: usize = 4;
let min_block_needed = lower_bound_number + 1;
// Early exit if the block is in the DB or too early.
{
let already_known = is_known(&head)?;
let before_relevant = header.number < min_block_needed;
if already_known || before_relevant {
return Ok(Vec::new());
}
}
let mut ancestry = vec![(head, header.clone())];
// Early exit if the parent hash is in the DB or no further blocks
// are needed.
if is_known(&header.parent_hash)? || header.number == min_block_needed {
return Ok(ancestry);
}
'outer: loop {
let (last_hash, last_header) = ancestry
.last()
.expect("ancestry has length 1 at initialization and is only added to; qed");
assert!(
last_header.number > min_block_needed,
"Loop invariant: the last block in ancestry is checked to be \
above the minimum before the loop, and at the end of each iteration; \
qed"
);
let (tx, rx) = oneshot::channel();
// This is always non-zero as determined by the loop invariant
// above.
let ancestry_step =
std::cmp::min(ANCESTRY_STEP, (last_header.number - min_block_needed) as usize);
let batch_hashes = if ancestry_step == 1 {
vec![last_header.parent_hash]
} else {
sender
.send_message(
ChainApiMessage::Ancestors {
hash: *last_hash,
k: ancestry_step,
response_channel: tx,
}
.into(),
)
.await;
// Continue past these errors.
match rx.await {
Err(_) | Ok(Err(_)) => break 'outer,
Ok(Ok(ancestors)) => ancestors,
}
};
let batch_headers = {
let (batch_senders, batch_receivers) = (0..batch_hashes.len())
.map(|_| oneshot::channel())
.unzip::<_, _, Vec<_>, Vec<_>>();
for (hash, batched_sender) in batch_hashes.iter().cloned().zip(batch_senders) {
sender
.send_message(ChainApiMessage::BlockHeader(hash, batched_sender).into())
.await;
}
let mut requests = futures::stream::FuturesOrdered::new();
batch_receivers
.into_iter()
.map(|rx| async move {
match rx.await {
Err(_) | Ok(Err(_)) => None,
Ok(Ok(h)) => h,
}
})
.for_each(|x| requests.push_back(x));
let batch_headers: Vec<_> =
requests.flat_map(|x: Option<Header>| stream::iter(x)).collect().await;
// Any failed header fetch of the batch will yield a `None` result that will
// be skipped. Any failure at this stage means we'll just ignore those blocks
// as the chain DB has failed us.
if batch_headers.len() != batch_hashes.len() {
break 'outer;
}
batch_headers
};
for (hash, header) in batch_hashes.into_iter().zip(batch_headers) {
let is_known = is_known(&hash)?;
let is_relevant = header.number >= min_block_needed;
let is_terminating = header.number == min_block_needed;
if is_known || !is_relevant {
break 'outer;
}
ancestry.push((hash, header));
if is_terminating {
break 'outer;
}
}
}
Ok(ancestry)
}
#[cfg(test)]
mod tests {
use super::*;
use assert_matches::assert_matches;
use pezkuwi_node_subsystem_test_helpers::make_subsystem_context;
use pezkuwi_overseer::{AllMessages, SubsystemContext};
use sp_core::testing::TaskExecutor;
use std::collections::{HashMap, HashSet};
#[derive(Default)]
struct TestKnownBlocks {
blocks: HashSet<Hash>,
}
impl TestKnownBlocks {
fn insert(&mut self, hash: Hash) {
self.blocks.insert(hash);
}
fn is_known(&self, hash: &Hash) -> Result<bool, ()> {
Ok(self.blocks.contains(hash))
}
}
#[derive(Clone)]
struct TestChain {
start_number: BlockNumber,
headers: Vec<Header>,
numbers: HashMap<Hash, BlockNumber>,
}
impl TestChain {
fn new(start: BlockNumber, len: usize) -> Self {
assert!(len > 0, "len must be at least 1");
let base = Header {
digest: Default::default(),
extrinsics_root: Default::default(),
number: start,
state_root: Default::default(),
parent_hash: Default::default(),
};
let base_hash = base.hash();
let mut chain = TestChain {
start_number: start,
headers: vec![base],
numbers: vec![(base_hash, start)].into_iter().collect(),
};
for _ in 1..len {
chain.grow()
}
chain
}
fn grow(&mut self) {
let next = {
let last = self.headers.last().unwrap();
Header {
digest: Default::default(),
extrinsics_root: Default::default(),
number: last.number + 1,
state_root: Default::default(),
parent_hash: last.hash(),
}
};
self.numbers.insert(next.hash(), next.number);
self.headers.push(next);
}
fn header_by_number(&self, number: BlockNumber) -> Option<&Header> {
if number < self.start_number {
None
} else {
self.headers.get((number - self.start_number) as usize)
}
}
fn header_by_hash(&self, hash: &Hash) -> Option<&Header> {
self.numbers.get(hash).and_then(|n| self.header_by_number(*n))
}
fn hash_by_number(&self, number: BlockNumber) -> Option<Hash> {
self.header_by_number(number).map(|h| h.hash())
}
fn ancestry(&self, hash: &Hash, k: BlockNumber) -> Vec<Hash> {
let n = match self.numbers.get(hash) {
None => return Vec::new(),
Some(&n) => n,
};
(0..k)
.map(|i| i + 1)
.filter_map(|i| self.header_by_number(n - i))
.map(|h| h.hash())
.collect()
}
}
#[test]
fn determine_new_blocks_back_to_lower_bound() {
let pool = TaskExecutor::new();
let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
let known = TestKnownBlocks::default();
let chain = TestChain::new(10, 9);
let head = chain.header_by_number(18).unwrap().clone();
let head_hash = head.hash();
let lower_bound_number = 12;
// Finalized block should be omitted. The head provided to `determine_new_blocks`
// should be included.
let expected_ancestry = (13..=18)
.map(|n| chain.header_by_number(n).map(|h| (h.hash(), h.clone())).unwrap())
.rev()
.collect::<Vec<_>>();
let test_fut = Box::pin(async move {
let ancestry = determine_new_blocks(
ctx.sender(),
|h| known.is_known(h),
head_hash,
&head,
lower_bound_number,
)
.await
.unwrap();
assert_eq!(ancestry, expected_ancestry);
});
let aux_fut = Box::pin(async move {
assert_matches!(
handle.recv().await,
AllMessages::ChainApi(ChainApiMessage::Ancestors {
hash: h,
k,
response_channel: tx,
}) => {
assert_eq!(h, head_hash);
assert_eq!(k, 4);
let _ = tx.send(Ok(chain.ancestry(&h, k as _)));
}
);
for _ in 0u32..4 {
assert_matches!(
handle.recv().await,
AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => {
let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone())));
}
);
}
assert_matches!(
handle.recv().await,
AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => {
assert_eq!(h, chain.hash_by_number(13).unwrap());
let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone())));
}
);
});
futures::executor::block_on(futures::future::join(test_fut, aux_fut));
}
#[test]
fn determine_new_blocks_back_to_known() {
let pool = TaskExecutor::new();
let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
let mut known = TestKnownBlocks::default();
let chain = TestChain::new(10, 9);
let head = chain.header_by_number(18).unwrap().clone();
let head_hash = head.hash();
let lower_bound_number = 12;
let known_number = 15;
let known_hash = chain.hash_by_number(known_number).unwrap();
known.insert(known_hash);
// Known block should be omitted. The head provided to `determine_new_blocks`
// should be included.
let expected_ancestry = (16..=18)
.map(|n| chain.header_by_number(n).map(|h| (h.hash(), h.clone())).unwrap())
.rev()
.collect::<Vec<_>>();
let test_fut = Box::pin(async move {
let ancestry = determine_new_blocks(
ctx.sender(),
|h| known.is_known(h),
head_hash,
&head,
lower_bound_number,
)
.await
.unwrap();
assert_eq!(ancestry, expected_ancestry);
});
let aux_fut = Box::pin(async move {
assert_matches!(
handle.recv().await,
AllMessages::ChainApi(ChainApiMessage::Ancestors {
hash: h,
k,
response_channel: tx,
}) => {
assert_eq!(h, head_hash);
assert_eq!(k, 4);
let _ = tx.send(Ok(chain.ancestry(&h, k as _)));
}
);
for _ in 0u32..4 {
assert_matches!(
handle.recv().await,
AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => {
let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone())));
}
);
}
});
futures::executor::block_on(futures::future::join(test_fut, aux_fut));
}
#[test]
fn determine_new_blocks_already_known_is_empty() {
let pool = TaskExecutor::new();
let (mut ctx, _handle) = make_subsystem_context::<(), _>(pool.clone());
let mut known = TestKnownBlocks::default();
let chain = TestChain::new(10, 9);
let head = chain.header_by_number(18).unwrap().clone();
let head_hash = head.hash();
let lower_bound_number = 0;
known.insert(head_hash);
// Known block should be omitted.
let expected_ancestry = Vec::new();
let test_fut = Box::pin(async move {
let ancestry = determine_new_blocks(
ctx.sender(),
|h| known.is_known(h),
head_hash,
&head,
lower_bound_number,
)
.await
.unwrap();
assert_eq!(ancestry, expected_ancestry);
});
futures::executor::block_on(test_fut);
}
#[test]
fn determine_new_blocks_parent_known_is_fast() {
let pool = TaskExecutor::new();
let (mut ctx, _handle) = make_subsystem_context::<(), _>(pool.clone());
let mut known = TestKnownBlocks::default();
let chain = TestChain::new(10, 9);
let head = chain.header_by_number(18).unwrap().clone();
let head_hash = head.hash();
let lower_bound_number = 0;
let parent_hash = chain.hash_by_number(17).unwrap();
known.insert(parent_hash);
// New block should be the only new one.
let expected_ancestry = vec![(head_hash, head.clone())];
let test_fut = Box::pin(async move {
let ancestry = determine_new_blocks(
ctx.sender(),
|h| known.is_known(h),
head_hash,
&head,
lower_bound_number,
)
.await
.unwrap();
assert_eq!(ancestry, expected_ancestry);
});
futures::executor::block_on(test_fut);
}
#[test]
fn determine_new_block_before_finality_is_empty() {
let pool = TaskExecutor::new();
let (mut ctx, _handle) = make_subsystem_context::<(), _>(pool.clone());
let chain = TestChain::new(10, 9);
let head = chain.header_by_number(18).unwrap().clone();
let head_hash = head.hash();
let parent_hash = chain.hash_by_number(17).unwrap();
let mut known = TestKnownBlocks::default();
known.insert(parent_hash);
let test_fut = Box::pin(async move {
let after_finality =
determine_new_blocks(ctx.sender(), |h| known.is_known(h), head_hash, &head, 17)
.await
.unwrap();
let at_finality =
determine_new_blocks(ctx.sender(), |h| known.is_known(h), head_hash, &head, 18)
.await
.unwrap();
let before_finality =
determine_new_blocks(ctx.sender(), |h| known.is_known(h), head_hash, &head, 19)
.await
.unwrap();
assert_eq!(after_finality, vec![(head_hash, head.clone())]);
assert_eq!(at_finality, Vec::new());
assert_eq!(before_finality, Vec::new());
});
futures::executor::block_on(test_fut);
}
#[test]
fn determine_new_blocks_does_not_request_genesis() {
let pool = TaskExecutor::new();
let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
let chain = TestChain::new(1, 2);
let head = chain.header_by_number(2).unwrap().clone();
let head_hash = head.hash();
let known = TestKnownBlocks::default();
let expected_ancestry = (1..=2)
.map(|n| chain.header_by_number(n).map(|h| (h.hash(), h.clone())).unwrap())
.rev()
.collect::<Vec<_>>();
let test_fut = Box::pin(async move {
let ancestry =
determine_new_blocks(ctx.sender(), |h| known.is_known(h), head_hash, &head, 0)
.await
.unwrap();
assert_eq!(ancestry, expected_ancestry);
});
let aux_fut = Box::pin(async move {
assert_matches!(
handle.recv().await,
AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => {
assert_eq!(h, chain.hash_by_number(1).unwrap());
let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone())));
}
);
});
futures::executor::block_on(futures::future::join(test_fut, aux_fut));
}
#[test]
fn determine_new_blocks_does_not_request_genesis_even_in_multi_ancestry() {
let pool = TaskExecutor::new();
let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
let chain = TestChain::new(1, 3);
let head = chain.header_by_number(3).unwrap().clone();
let head_hash = head.hash();
let known = TestKnownBlocks::default();
let expected_ancestry = (1..=3)
.map(|n| chain.header_by_number(n).map(|h| (h.hash(), h.clone())).unwrap())
.rev()
.collect::<Vec<_>>();
let test_fut = Box::pin(async move {
let ancestry =
determine_new_blocks(ctx.sender(), |h| known.is_known(h), head_hash, &head, 0)
.await
.unwrap();
assert_eq!(ancestry, expected_ancestry);
});
let aux_fut = Box::pin(async move {
assert_matches!(
handle.recv().await,
AllMessages::ChainApi(ChainApiMessage::Ancestors {
hash: h,
k,
response_channel: tx,
}) => {
assert_eq!(h, head_hash);
assert_eq!(k, 2);
let _ = tx.send(Ok(chain.ancestry(&h, k as _)));
}
);
for _ in 0_u8..2 {
assert_matches!(
handle.recv().await,
AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => {
let _ = tx.send(Ok(chain.header_by_hash(&h).map(|h| h.clone())));
}
);
}
});
futures::executor::block_on(futures::future::join(test_fut, aux_fut));
}
}
File diff suppressed because it is too large Load Diff
+545
View File
@@ -0,0 +1,545 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Utility module for subsystems
//!
//! Many subsystems have common interests such as canceling a bunch of spawned jobs,
//! or determining what their validator ID is. These common interests are factored into
//! this module.
//!
//! This crate also reexports Prometheus metric types which are expected to be implemented by
//! subsystems.
#![warn(missing_docs)]
pub use overseer::{
gen::{OrchestraError as OverseerError, Timeout},
Subsystem, TimeoutExt,
};
use pezkuwi_node_subsystem::{
errors::{RuntimeApiError, SubsystemError},
messages::{RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender},
overseer, SubsystemSender,
};
pub use pezkuwi_node_metrics::{metrics, Metronome};
use codec::Encode;
use futures::channel::{mpsc, oneshot};
use pezkuwi_primitives::{
async_backing::{BackingState, Constraints},
slashing, AsyncBackingParams, AuthorityDiscoveryId, CandidateEvent, CandidateHash,
CommittedCandidateReceiptV2 as CommittedCandidateReceipt, CoreIndex, CoreState, EncodeAs,
ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, NodeFeatures,
OccupiedCoreAssumption, PersistedValidationData, ScrapedOnChainVotes, SessionIndex,
SessionInfo, Signed, SigningContext, ValidationCode, ValidationCodeHash, ValidatorId,
ValidatorIndex, ValidatorSignature,
};
pub use rand;
use sp_application_crypto::AppCrypto;
use sp_core::ByteArray;
use sp_keystore::{Error as KeystoreError, KeystorePtr};
use std::{
collections::{BTreeMap, VecDeque},
time::Duration,
};
use thiserror::Error;
pub use determine_new_blocks::determine_new_blocks;
pub use metered;
pub use pezkuwi_node_network_protocol::MIN_GOSSIP_PEERS;
/// These reexports are required so that external crates can use the `delegated_subsystem` macro
/// properly.
pub mod reexports {
pub use pezkuwi_overseer::gen::{SpawnedSubsystem, Spawner, Subsystem, SubsystemContext};
}
/// Helpers for the validator->chunk index mapping.
pub mod availability_chunks;
/// A utility for managing the implicit view of the relay-chain derived from active
/// leaves and the minimum allowed relay-parents that teyrchain candidates can have
/// and be backed in those leaves' children.
pub mod backing_implicit_view;
/// Database trait for subsystem.
pub mod database;
/// An emulator for node-side code to predict the results of on-chain teyrchain inclusion
/// and predict future constraints.
pub mod inclusion_emulator;
/// Convenient and efficient runtime info access.
pub mod runtime;
/// Helpers for working with unreleased runtime calls
pub mod vstaging;
/// Nested message sending
///
/// Useful for having mostly synchronous code, with submodules spawning short lived asynchronous
/// tasks, sending messages back.
pub mod nesting_sender;
pub mod reputation;
mod determine_new_blocks;
mod controlled_validator_indices;
pub use controlled_validator_indices::ControlledValidatorIndices;
#[cfg(test)]
mod tests;
const LOG_TARGET: &'static str = "teyrchain::subsystem-util";
/// Duration a job will wait after sending a stop signal before hard-aborting.
pub const JOB_GRACEFUL_STOP_DURATION: Duration = Duration::from_secs(1);
/// Capacity of channels to and from individual jobs
pub const JOB_CHANNEL_CAPACITY: usize = 64;
/// Utility errors
#[derive(Debug, Error)]
pub enum Error {
/// Attempted to send or receive on a oneshot channel which had been canceled
#[error(transparent)]
Oneshot(#[from] oneshot::Canceled),
/// Attempted to send on a MPSC channel which has been canceled
#[error(transparent)]
Mpsc(#[from] mpsc::SendError),
/// A subsystem error
#[error(transparent)]
Subsystem(#[from] SubsystemError),
/// An error in the Runtime API.
#[error(transparent)]
RuntimeApi(#[from] RuntimeApiError),
/// The type system wants this even though it doesn't make sense
#[error(transparent)]
Infallible(#[from] std::convert::Infallible),
/// Attempted to convert from an `AllMessages` to a `FromJob`, and failed.
#[error("AllMessage not relevant to Job")]
SenderConversion(String),
/// The local node is not a validator.
#[error("Node is not a validator")]
NotAValidator,
/// Already forwarding errors to another sender
#[error("AlreadyForwarding")]
AlreadyForwarding,
/// Data that are supposed to be there a not there
#[error("Data are not available")]
DataNotAvailable,
}
impl From<OverseerError> for Error {
fn from(e: OverseerError) -> Self {
Self::from(SubsystemError::from(e))
}
}
impl TryFrom<crate::runtime::Error> for Error {
type Error = ();
fn try_from(e: crate::runtime::Error) -> Result<Self, ()> {
use crate::runtime::Error;
match e {
Error::RuntimeRequestCanceled(e) => Ok(Self::Oneshot(e)),
Error::RuntimeRequest(e) => Ok(Self::RuntimeApi(e)),
Error::NoSuchSession(_) | Error::NoExecutorParams(_) => Err(()),
}
}
}
/// A type alias for Runtime API receivers.
pub type RuntimeApiReceiver<T> = oneshot::Receiver<Result<T, RuntimeApiError>>;
/// Request some data from the `RuntimeApi`.
pub async fn request_from_runtime<RequestBuilder, Response, Sender>(
parent: Hash,
sender: &mut Sender,
request_builder: RequestBuilder,
) -> RuntimeApiReceiver<Response>
where
RequestBuilder: FnOnce(RuntimeApiSender<Response>) -> RuntimeApiRequest,
Sender: SubsystemSender<RuntimeApiMessage>,
{
let (tx, rx) = oneshot::channel();
sender
.send_message(RuntimeApiMessage::Request(parent, request_builder(tx)).into())
.await;
rx
}
/// Verifies if `TeyrchainHost` runtime api is at least at version `required_runtime_version`. This
/// method is used to determine if a given runtime call is supported by the runtime.
pub async fn has_required_runtime<Sender>(
sender: &mut Sender,
relay_parent: Hash,
required_runtime_version: u32,
) -> bool
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
gum::trace!(target: LOG_TARGET, ?relay_parent, "Fetching TeyrchainHost runtime api version");
let (tx, rx) = oneshot::channel();
sender
.send_message(RuntimeApiMessage::Request(relay_parent, RuntimeApiRequest::Version(tx)))
.await;
match rx.await {
Result::Ok(Ok(runtime_version)) => {
gum::trace!(
target: LOG_TARGET,
?relay_parent,
?runtime_version,
?required_runtime_version,
"Fetched TeyrchainHost runtime api version"
);
runtime_version >= required_runtime_version
},
Result::Ok(Err(RuntimeApiError::Execution { source: error, .. })) => {
gum::trace!(
target: LOG_TARGET,
?relay_parent,
?error,
"Execution error while fetching TeyrchainHost runtime api version"
);
false
},
Result::Ok(Err(RuntimeApiError::NotSupported { .. })) => {
gum::trace!(
target: LOG_TARGET,
?relay_parent,
"NotSupported error while fetching TeyrchainHost runtime api version"
);
false
},
Result::Err(_) => {
gum::trace!(
target: LOG_TARGET,
?relay_parent,
"Cancelled error while fetching TeyrchainHost runtime api version"
);
false
},
}
}
/// Construct specialized request functions for the runtime.
///
/// These would otherwise get pretty repetitive.
macro_rules! specialize_requests {
// expand return type name for documentation purposes
(fn $func_name:ident( $( $param_name:ident : $param_ty:ty ),* ) -> $return_ty:ty ; $request_variant:ident;) => {
specialize_requests!{
named stringify!($request_variant) ; fn $func_name( $( $param_name : $param_ty ),* ) -> $return_ty ; $request_variant;
}
};
// create a single specialized request function
(named $doc_name:expr ; fn $func_name:ident( $( $param_name:ident : $param_ty:ty ),* ) -> $return_ty:ty ; $request_variant:ident;) => {
#[doc = "Request `"]
#[doc = $doc_name]
#[doc = "` from the runtime"]
pub async fn $func_name (
parent: Hash,
$(
$param_name: $param_ty,
)*
sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
) -> RuntimeApiReceiver<$return_ty>
{
request_from_runtime(parent, sender, |tx| RuntimeApiRequest::$request_variant(
$( $param_name, )* tx
)).await
}
};
// recursive decompose
(
fn $func_name:ident( $( $param_name:ident : $param_ty:ty ),* ) -> $return_ty:ty ; $request_variant:ident;
$(
fn $t_func_name:ident( $( $t_param_name:ident : $t_param_ty:ty ),* ) -> $t_return_ty:ty ; $t_request_variant:ident;
)+
) => {
specialize_requests!{
fn $func_name( $( $param_name : $param_ty ),* ) -> $return_ty ; $request_variant ;
}
specialize_requests!{
$(
fn $t_func_name( $( $t_param_name : $t_param_ty ),* ) -> $t_return_ty ; $t_request_variant ;
)+
}
};
}
specialize_requests! {
fn request_runtime_api_version() -> u32; Version;
fn request_authorities() -> Vec<AuthorityDiscoveryId>; Authorities;
fn request_validators() -> Vec<ValidatorId>; Validators;
fn request_validator_groups() -> (Vec<Vec<ValidatorIndex>>, GroupRotationInfo); ValidatorGroups;
fn request_availability_cores() -> Vec<CoreState>; AvailabilityCores;
fn request_persisted_validation_data(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option<PersistedValidationData>; PersistedValidationData;
fn request_assumed_validation_data(para_id: ParaId, expected_persisted_validation_data_hash: Hash) -> Option<(PersistedValidationData, ValidationCodeHash)>; AssumedValidationData;
fn request_session_index_for_child() -> SessionIndex; SessionIndexForChild;
fn request_validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option<ValidationCode>; ValidationCode;
fn request_validation_code_by_hash(validation_code_hash: ValidationCodeHash) -> Option<ValidationCode>; ValidationCodeByHash;
fn request_candidate_pending_availability(para_id: ParaId) -> Option<CommittedCandidateReceipt>; CandidatePendingAvailability;
fn request_candidates_pending_availability(para_id: ParaId) -> Vec<CommittedCandidateReceipt>; CandidatesPendingAvailability;
fn request_candidate_events() -> Vec<CandidateEvent>; CandidateEvents;
fn request_session_info(index: SessionIndex) -> Option<SessionInfo>; SessionInfo;
fn request_validation_code_hash(para_id: ParaId, assumption: OccupiedCoreAssumption)
-> Option<ValidationCodeHash>; ValidationCodeHash;
fn request_on_chain_votes() -> Option<ScrapedOnChainVotes>; FetchOnChainVotes;
fn request_session_executor_params(session_index: SessionIndex) -> Option<ExecutorParams>;SessionExecutorParams;
fn request_unapplied_slashes() -> Vec<(SessionIndex, CandidateHash, slashing::LegacyPendingSlashes)>; UnappliedSlashes;
fn request_unapplied_slashes_v2() -> Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>; UnappliedSlashesV2;
fn request_key_ownership_proof(validator_id: ValidatorId) -> Option<slashing::OpaqueKeyOwnershipProof>; KeyOwnershipProof;
fn request_submit_report_dispute_lost(dp: slashing::DisputeProof, okop: slashing::OpaqueKeyOwnershipProof) -> Option<()>; SubmitReportDisputeLost;
fn request_disabled_validators() -> Vec<ValidatorIndex>; DisabledValidators;
fn request_async_backing_params() -> AsyncBackingParams; AsyncBackingParams;
fn request_claim_queue() -> BTreeMap<CoreIndex, VecDeque<ParaId>>; ClaimQueue;
fn request_para_backing_state(para_id: ParaId) -> Option<BackingState>; ParaBackingState;
fn request_backing_constraints(para_id: ParaId) -> Option<Constraints>; BackingConstraints;
fn request_min_backing_votes(session_index: SessionIndex) -> u32; MinimumBackingVotes;
fn request_node_features(session_index: SessionIndex) -> NodeFeatures; NodeFeatures;
fn request_para_ids(session_index: SessionIndex) -> Vec<ParaId>; ParaIds;
}
/// Requests executor parameters from the runtime effective at given relay-parent. First obtains
/// session index at the relay-parent, relying on the fact that it should be cached by the runtime
/// API caching layer even if the block itself has already been pruned. Then requests executor
/// parameters by session index.
/// Returns an error if failed to communicate to the runtime, or the parameters are not in the
/// storage, which should never happen.
/// Returns default execution parameters if the runtime doesn't yet support `SessionExecutorParams`
/// API call.
/// Otherwise, returns execution parameters returned by the runtime.
pub async fn executor_params_at_relay_parent(
relay_parent: Hash,
sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
) -> Result<ExecutorParams, Error> {
match request_session_index_for_child(relay_parent, sender).await.await {
Err(err) => {
// Failed to communicate with the runtime
Err(Error::Oneshot(err))
},
Ok(Err(err)) => {
// Runtime has failed to obtain a session index at the relay-parent.
Err(Error::RuntimeApi(err))
},
Ok(Ok(session_index)) => {
match request_session_executor_params(relay_parent, session_index, sender).await.await {
Err(err) => {
// Failed to communicate with the runtime
Err(Error::Oneshot(err))
},
Ok(Err(RuntimeApiError::NotSupported { .. })) => {
// Runtime doesn't yet support the api requested, should execute anyway
// with default set of parameters
Ok(ExecutorParams::default())
},
Ok(Err(err)) => {
// Runtime failed to execute the request
Err(Error::RuntimeApi(err))
},
Ok(Ok(None)) => {
// Storage doesn't contain a parameter set for the given session; should
// never happen
Err(Error::DataNotAvailable)
},
Ok(Ok(Some(executor_params))) => Ok(executor_params),
}
},
}
}
/// From the given set of validators, find the first key we can sign with, if any.
pub fn signing_key<'a>(
validators: impl IntoIterator<Item = &'a ValidatorId>,
keystore: &KeystorePtr,
) -> Option<ValidatorId> {
signing_key_and_index(validators, keystore).map(|(k, _)| k)
}
/// From the given set of validators, find the first key we can sign with, if any, and return it
/// along with the validator index.
pub fn signing_key_and_index<'a>(
validators: impl IntoIterator<Item = &'a ValidatorId>,
keystore: &KeystorePtr,
) -> Option<(ValidatorId, ValidatorIndex)> {
for (i, v) in validators.into_iter().enumerate() {
if keystore.has_keys(&[(v.to_raw_vec(), ValidatorId::ID)]) {
return Some((v.clone(), ValidatorIndex(i as _)));
}
}
None
}
/// Sign the given data with the given validator ID.
///
/// Returns `Ok(None)` if the private key that corresponds to that validator ID is not found in the
/// given keystore. Returns an error if the key could not be used for signing.
pub fn sign(
keystore: &KeystorePtr,
key: &ValidatorId,
data: &[u8],
) -> Result<Option<ValidatorSignature>, KeystoreError> {
let signature = keystore
.sr25519_sign(ValidatorId::ID, key.as_ref(), data)?
.map(|sig| sig.into());
Ok(signature)
}
/// Find the validator group the given validator index belongs to.
pub fn find_validator_group(
groups: &[Vec<ValidatorIndex>],
index: ValidatorIndex,
) -> Option<GroupIndex> {
groups.iter().enumerate().find_map(|(i, g)| {
if g.contains(&index) {
Some(GroupIndex(i as _))
} else {
None
}
})
}
/// Choose a random subset of `min` elements.
/// But always include `is_priority` elements.
pub fn choose_random_subset<T, F: FnMut(&T) -> bool>(is_priority: F, v: &mut Vec<T>, min: usize) {
choose_random_subset_with_rng(is_priority, v, &mut rand::thread_rng(), min)
}
/// Choose a random subset of `min` elements using a specific Random Generator `Rng`
/// But always include `is_priority` elements.
pub fn choose_random_subset_with_rng<T, F: FnMut(&T) -> bool, R: rand::Rng>(
is_priority: F,
v: &mut Vec<T>,
rng: &mut R,
min: usize,
) {
use rand::seq::SliceRandom as _;
// partition the elements into priority first
// the returned index is when non_priority elements start
let i = itertools::partition(v.iter_mut(), is_priority);
if i >= min || v.len() <= i {
v.truncate(i);
return;
}
v[i..].shuffle(rng);
v.truncate(min);
}
/// Returns a `bool` with a probability of `a / b` of being true.
pub fn gen_ratio(a: usize, b: usize) -> bool {
gen_ratio_rng(a, b, &mut rand::thread_rng())
}
/// Returns a `bool` with a probability of `a / b` of being true.
pub fn gen_ratio_rng<R: rand::Rng>(a: usize, b: usize, rng: &mut R) -> bool {
rng.gen_ratio(a as u32, b as u32)
}
/// Local validator information
///
/// It can be created if the local node is a validator in the context of a particular
/// relay chain block.
#[derive(Debug)]
pub struct Validator {
signing_context: SigningContext,
key: ValidatorId,
index: ValidatorIndex,
disabled: bool,
}
impl Validator {
/// Get a struct representing this node's validator if this node is in fact a validator in the
/// context of the given block.
pub async fn new<S>(parent: Hash, keystore: KeystorePtr, sender: &mut S) -> Result<Self, Error>
where
S: SubsystemSender<RuntimeApiMessage>,
{
// Note: request_validators, request_disabled_validators and request_session_index_for_child
// do not and cannot run concurrently: they both have a mutable handle to the same sender.
// However, each of them returns a oneshot::Receiver, and those are resolved concurrently.
let (validators, disabled_validators, session_index) = futures::try_join!(
request_validators(parent, sender).await,
request_disabled_validators(parent, sender).await,
request_session_index_for_child(parent, sender).await,
)?;
let signing_context = SigningContext { session_index: session_index?, parent_hash: parent };
let validators = validators?;
let disabled_validators = disabled_validators?;
Self::construct(&validators, &disabled_validators, signing_context, keystore)
}
/// Construct a validator instance without performing runtime fetches.
///
/// This can be useful if external code also needs the same data.
pub fn construct(
validators: &[ValidatorId],
disabled_validators: &[ValidatorIndex],
signing_context: SigningContext,
keystore: KeystorePtr,
) -> Result<Self, Error> {
let (key, index) =
signing_key_and_index(validators, &keystore).ok_or(Error::NotAValidator)?;
let disabled = disabled_validators.iter().any(|d: &ValidatorIndex| *d == index);
Ok(Validator { signing_context, key, index, disabled })
}
/// Get this validator's id.
pub fn id(&self) -> ValidatorId {
self.key.clone()
}
/// Get this validator's local index.
pub fn index(&self) -> ValidatorIndex {
self.index
}
/// Get the enabled/disabled state of this validator
pub fn disabled(&self) -> bool {
self.disabled
}
/// Get the current signing context.
pub fn signing_context(&self) -> &SigningContext {
&self.signing_context
}
/// Sign a payload with this validator
pub fn sign<Payload: EncodeAs<RealPayload>, RealPayload: Encode>(
&self,
keystore: KeystorePtr,
payload: Payload,
) -> Result<Option<Signed<Payload, RealPayload>>, KeystoreError> {
Signed::sign(&keystore, payload, &self.signing_context, self.index, &self.key)
}
}
@@ -0,0 +1,208 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! ## Background
//!
//! Writing concurrent and even multithreaded by default is inconvenient and slow: No references
//! hence lots of needless cloning and data duplication, locks, mutexes, ... We should reach
//! for concurrency and parallelism when there is an actual need, not just because we can and it is
//! reasonably safe in Rust.
//!
//! I very much agree with many points in this blog post for example:
//!
//! <https://maciej.codes/2022-06-09-local-async.html>
//!
//! Another very good post by Pierre (Tomaka):
//!
//! <https://tomaka.medium.com/a-look-back-at-asynchronous-rust-d54d63934a1c>
//!
//! ## Architecture
//!
//! This module helps with this in part. It does not break the multithreaded by default approach,
//! but it breaks the `spawn everything` approach. So once you `spawn` you will still be
//! multithreaded by default, despite that for most tasks we spawn (which just wait for network or
//! some message to arrive), that is very much pointless and needless overhead. You will just spawn
//! less in the first place.
//!
//! By default your code is single threaded, except when actually needed:
//! - need to wait for long running synchronous IO (a threaded runtime is actually useful here)
//! - need to wait for some async event (message to arrive)
//! - need to do some hefty CPU bound processing (a thread is required here as well)
//!
//! and it is not acceptable to block the main task for waiting for the result, because we actually
//! really have other things to do or at least need to stay responsive just in case.
//!
//! With the types and traits in this module you can achieve exactly that: You write modules which
//! just execute logic and can call into the functions of other modules - yes we are calling normal
//! functions. For the case a module you are calling into requires an occasional background task,
//! you provide it with a `NestingSender<M, ChildModuleMessage>` that it can pass to any spawned
//! tasks.
//!
//! This way you don't have to spawn a task for each module just for it to be able to handle
//! asynchronous events. The module relies on the using/enclosing code/module to forward it any
//! asynchronous messages in a structured way.
//!
//! What makes this architecture nice is the separation of concerns - at the top you only have to
//! provide a sender and dispatch received messages to the root module - it is completely
//! irrelevant how complex that module is, it might consist of child modules also having the need
//! to spawn and receive messages, which in turn do the same, still the root logic stays unchanged.
//! Everything is isolated to the level where it belongs, while we still keep a single task scope
//! in all non blocking/not CPU intensive parts, which allows us to share data via references for
//! example.
//!
//! Because the wrapping is optional and transparent to the lower modules, each module can also be
//! used at the top directly without any wrapping, e.g. for standalone use or for testing purposes.
//!
//! Checkout the documentation of [`NestingSender`][nesting_sender::NestingSender] below for a basic
//! usage example. For a real world usage I would like to point you to the dispute-distribution
//! subsystem which makes use of this architecture.
//!
//! ## Limitations
//!
//! Nothing is ever for free of course: Each level adds an indirect function call to message
//! sending. which should be cheap enough for most applications, but something to keep in mind. In
//! particular we avoided the use of of async traits, which would have required memory allocations
//! on each send. Also cloning of [`NestingSender`][nesting_sender::NestingSender] is more
//! expensive than cloning a plain mpsc::Sender, the overhead should be negligible though.
//!
//! Further limitations: Because everything is routed to the same channel, it is not possible with
//! this approach to put back pressure on only a single source (as all are the same). If a module
//! has a task that requires this, it indeed has to spawn a long running task which can do the
//! back-pressure on that message source or we make it its own subsystem. This is just one of the
//! situations that justifies the complexity of asynchrony.
use std::{convert::identity, sync::Arc};
use futures::{channel::mpsc, SinkExt};
/// A message sender that supports sending nested messages.
///
/// This sender wraps an `mpsc::Sender` and a conversion function for converting given messages of
/// type `Mnested` to the message type actually supported by the mpsc (`M`).
///
/// Example:
///
/// ```rust
/// # use pezkuwi_node_subsystem_util::nesting_sender::NestingSender;
///
/// enum RootMessage {
/// Child1Message(ChildMessage),
/// Child2Message(OtherChildMessage),
/// SomeOwnMessage,
/// }
///
/// enum ChildMessage {
/// TaskFinished(u32),
/// }
///
/// enum OtherChildMessage {
/// QueryResult(bool),
/// }
///
/// // We would then pass in a `NestingSender` to our child module of the following type:
/// type ChildSender = NestingSender<RootMessage, ChildMessage>;
///
/// // Types in the child module can (and should) be generic over the root type:
/// struct ChildState<M> {
/// tx: NestingSender<M, ChildMessage>,
/// }
///
///
/// // Create the root message sender:
///
/// let (root_sender, receiver) = NestingSender::new_root(1);
/// // Get a sender for the child module based on that root sender:
/// let child_sender = NestingSender::new(root_sender.clone(), RootMessage::Child1Message);
/// // pass `child_sender` to child module ...
/// ```
///
/// `ChildMessage` could itself have a constructor with messages of a child of its own and can use
/// `NestingSender::new` with its own sender and a conversion function to provide a further nested
/// sender, suitable for the child module.
pub struct NestingSender<M, Mnested> {
sender: mpsc::Sender<M>,
conversion: Arc<dyn Fn(Mnested) -> M + 'static + Send + Sync>,
}
impl<M> NestingSender<M, M>
where
M: 'static,
{
/// Create a new "root" sender.
///
/// This is a sender that directly passes messages to the internal mpsc.
///
/// Params: The channel size of the created mpsc.
/// Returns: The newly constructed `NestingSender` and the corresponding mpsc receiver.
pub fn new_root(channel_size: usize) -> (Self, mpsc::Receiver<M>) {
let (sender, receiver) = mpsc::channel(channel_size);
let s = Self { sender, conversion: Arc::new(identity) };
(s, receiver)
}
}
impl<M, Mnested> NestingSender<M, Mnested>
where
M: 'static,
Mnested: 'static,
{
/// Create a new `NestingSender` which wraps a given "parent" sender.
///
/// By passing in a necessary conversion from `Mnested` to `Mparent` (the `Mnested` of the
/// parent sender), we can construct a derived `NestingSender<M, Mnested>` from a
/// `NestingSender<M, Mparent>`.
///
/// Resulting sender does the following conversion:
///
/// ```text
/// Mnested -> Mparent -> M
/// Inputs:
/// F(Mparent) -> M (via parent)
/// F(Mnested) -> Mparent (via child_conversion)
/// Result: F(Mnested) -> M
/// ```
pub fn new<Mparent>(
parent: NestingSender<M, Mparent>,
child_conversion: fn(Mnested) -> Mparent,
) -> Self
where
Mparent: 'static,
{
let NestingSender { sender, conversion } = parent;
Self { sender, conversion: Arc::new(move |x| conversion(child_conversion(x))) }
}
/// Send a message via the underlying mpsc.
///
/// Necessary conversion is accomplished.
pub async fn send_message(&mut self, m: Mnested) -> Result<(), mpsc::SendError> {
// Flushing on an mpsc means to wait for the receiver to pick up the data - we don't want
// to wait for that.
self.sender.feed((self.conversion)(m)).await
}
}
// Helper traits and implementations:
impl<M, Mnested> Clone for NestingSender<M, Mnested>
where
M: 'static,
Mnested: 'static,
{
fn clone(&self) -> Self {
Self { sender: self.sender.clone(), conversion: self.conversion.clone() }
}
}
@@ -0,0 +1,118 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A utility abstraction to collect and send reputation changes.
use pezkuwi_node_network_protocol::{PeerId, UnifiedReputationChange};
use pezkuwi_node_subsystem::{
messages::{NetworkBridgeTxMessage, ReportPeerMessage},
overseer,
};
use std::{collections::HashMap, time::Duration};
/// Default delay for sending reputation changes
pub const REPUTATION_CHANGE_INTERVAL: Duration = Duration::from_secs(30);
const LOG_TARGET: &'static str = "teyrchain::reputation-aggregator";
type BatchReputationChange = HashMap<PeerId, i32>;
/// Collects reputation changes and sends them in one batch to relieve network channels
#[derive(Debug, Clone)]
pub struct ReputationAggregator {
send_immediately_if: fn(UnifiedReputationChange) -> bool,
by_peer: Option<BatchReputationChange>,
}
impl Default for ReputationAggregator {
fn default() -> Self {
Self::new(|rep| matches!(rep, UnifiedReputationChange::Malicious(_)))
}
}
impl ReputationAggregator {
/// New `ReputationAggregator`
///
/// # Arguments
///
/// * `send_immediately_if` - A function, takes `UnifiedReputationChange`,
/// results shows if we need to send the changes right away.
/// By default, it is used for sending `UnifiedReputationChange::Malicious` changes immediately
/// and for testing.
pub fn new(send_immediately_if: fn(UnifiedReputationChange) -> bool) -> Self {
Self { by_peer: Default::default(), send_immediately_if }
}
/// Sends collected reputation changes in a batch,
/// removing them from inner state
pub async fn send(
&mut self,
sender: &mut impl overseer::SubsystemSender<NetworkBridgeTxMessage>,
) {
if let Some(by_peer) = self.by_peer.take() {
sender
.send_message(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Batch(by_peer)))
.await;
}
}
/// Adds reputation change to inner state
/// or sends it right away if the change is dangerous
pub async fn modify(
&mut self,
sender: &mut impl overseer::SubsystemSender<NetworkBridgeTxMessage>,
peer_id: PeerId,
rep: UnifiedReputationChange,
) {
if rep.cost_or_benefit() < 0 {
gum::debug!(target: LOG_TARGET, peer = ?peer_id, ?rep, "Reduce reputation");
}
if (self.send_immediately_if)(rep) {
self.single_send(sender, peer_id, rep).await;
} else {
self.add(peer_id, rep);
}
}
async fn single_send(
&self,
sender: &mut impl overseer::SubsystemSender<NetworkBridgeTxMessage>,
peer_id: PeerId,
rep: UnifiedReputationChange,
) {
sender
.send_message(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(
peer_id,
rep.into(),
)))
.await;
}
fn add(&mut self, peer_id: PeerId, rep: UnifiedReputationChange) {
let by_peer = self.by_peer.get_or_insert(HashMap::new());
add_reputation(by_peer, peer_id, rep)
}
}
/// Add a reputation change to an existing collection.
pub fn add_reputation(
acc: &mut BatchReputationChange,
peer_id: PeerId,
rep: UnifiedReputationChange,
) {
let cost = rep.cost_or_benefit();
acc.entry(peer_id).and_modify(|v| *v = v.saturating_add(cost)).or_insert(cost);
}
@@ -0,0 +1,58 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//
//! Error handling related code and Error/Result definitions.
use futures::channel::oneshot;
use pezkuwi_node_subsystem::errors::RuntimeApiError;
use pezkuwi_primitives::SessionIndex;
#[allow(missing_docs)]
#[fatality::fatality(splitable)]
pub enum Error {
/// Runtime API subsystem is down, which means we're shutting down.
#[fatal]
#[error("Runtime request got canceled")]
RuntimeRequestCanceled(#[from] oneshot::Canceled),
/// Some request to the runtime failed.
/// For example if we prune a block we're requesting info about.
#[error("Runtime API error {0}")]
RuntimeRequest(#[from] RuntimeApiError),
/// We tried fetching a session info which was not available.
#[error("There was no session with the given index {0}")]
NoSuchSession(SessionIndex),
/// We tried fetching executor params for a session which were not available.
#[error("There was no executor parameters for session with the given index {0}")]
NoExecutorParams(SessionIndex),
}
pub type Result<T> = std::result::Result<T, Error>;
/// Receive a response from a runtime request and convert errors.
pub async fn recv_runtime<V>(
r: oneshot::Receiver<std::result::Result<V, RuntimeApiError>>,
) -> Result<V> {
let result = r
.await
.map_err(FatalError::RuntimeRequestCanceled)?
.map_err(JfyiError::RuntimeRequest)?;
Ok(result)
}
@@ -0,0 +1,713 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Convenient interface to runtime information.
use pezkuwi_node_primitives::MAX_FINALITY_LAG;
use schnellru::{ByLength, LruMap};
use codec::Encode;
use sp_application_crypto::AppCrypto;
use sp_core::crypto::ByteArray;
use sp_keystore::{Keystore, KeystorePtr};
use pezkuwi_node_subsystem::{
errors::RuntimeApiError,
messages::{RuntimeApiMessage, RuntimeApiRequest},
overseer, SubsystemSender,
};
use pezkuwi_node_subsystem_types::UnpinHandle;
use pezkuwi_primitives::{
node_features::FeatureIndex, slashing, CandidateEvent, CandidateHash, CoreIndex, CoreState,
EncodeAs, ExecutorParams, GroupIndex, GroupRotationInfo, Hash, Id as ParaId, IndexedVec,
NodeFeatures, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed,
SigningContext, UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId,
ValidatorIndex, DEFAULT_SCHEDULING_LOOKAHEAD,
};
use std::collections::{BTreeMap, VecDeque};
use crate::{
request_availability_cores, request_candidate_events, request_claim_queue,
request_disabled_validators, request_from_runtime, request_key_ownership_proof,
request_node_features, request_on_chain_votes, request_session_executor_params,
request_session_index_for_child, request_session_info, request_submit_report_dispute_lost,
request_unapplied_slashes, request_unapplied_slashes_v2, request_validation_code_by_hash,
request_validator_groups,
};
/// Errors that can happen on runtime fetches.
mod error;
use error::Result;
pub use error::{recv_runtime, Error, FatalError, JfyiError};
const LOG_TARGET: &'static str = "teyrchain::runtime-info";
/// Configuration for construction a `RuntimeInfo`.
pub struct Config {
/// Needed for retrieval of `ValidatorInfo`
///
/// Pass `None` if you are not interested.
pub keystore: Option<KeystorePtr>,
/// How many sessions should we keep in the cache?
pub session_cache_lru_size: u32,
}
/// Caching of session info.
///
/// It should be ensured that a cached session stays live in the cache as long as we might need it.
pub struct RuntimeInfo {
/// Get the session index for a given relay parent.
///
/// We query this up to a 100 times per block, so caching it here without roundtrips over the
/// overseer seems sensible.
session_index_cache: LruMap<Hash, SessionIndex>,
/// In the happy case, we do not query disabled validators at all. In the worst case, we can
/// query it order of `n_cores` times `n_validators` per block, so caching it here seems
/// sensible.
disabled_validators_cache: LruMap<Hash, Vec<ValidatorIndex>>,
/// Look up cached sessions by `SessionIndex`.
session_info_cache: LruMap<SessionIndex, ExtendedSessionInfo>,
/// Unpin handle of *some* block in the session.
/// Only blocks pinned explicitly by `pin_block` are stored here.
pinned_blocks: LruMap<SessionIndex, UnpinHandle>,
/// Key store for determining whether we are a validator and what `ValidatorIndex` we have.
keystore: Option<KeystorePtr>,
}
/// `SessionInfo` with additional useful data for validator nodes.
pub struct ExtendedSessionInfo {
/// Actual session info as fetched from the runtime.
pub session_info: SessionInfo,
/// Contains useful information about ourselves, in case this node is a validator.
pub validator_info: ValidatorInfo,
/// Session executor parameters
pub executor_params: ExecutorParams,
/// Node features
pub node_features: NodeFeatures,
}
/// Information about ourselves, in case we are an `Authority`.
///
/// This data is derived from the `SessionInfo` and our key as found in the keystore.
pub struct ValidatorInfo {
/// The index this very validator has in `SessionInfo` vectors, if any.
pub our_index: Option<ValidatorIndex>,
/// The group we belong to, if any.
pub our_group: Option<GroupIndex>,
}
impl Default for Config {
fn default() -> Self {
Self {
keystore: None,
// Usually we need to cache the current and the last session.
session_cache_lru_size: 2,
}
}
}
impl RuntimeInfo {
/// Create a new `RuntimeInfo` for convenient runtime fetches.
pub fn new(keystore: Option<KeystorePtr>) -> Self {
Self::new_with_config(Config { keystore, ..Default::default() })
}
/// Create with more elaborate configuration options.
pub fn new_with_config(cfg: Config) -> Self {
Self {
// Usually messages are processed for blocks pointing to hashes from last finalized
// block to to best, so make this cache large enough to hold at least this amount of
// hashes, so that we get the benefit of caching even when finality lag is large.
session_index_cache: LruMap::new(ByLength::new(
cfg.session_cache_lru_size.max(2 * MAX_FINALITY_LAG),
)),
session_info_cache: LruMap::new(ByLength::new(cfg.session_cache_lru_size)),
disabled_validators_cache: LruMap::new(ByLength::new(100)),
pinned_blocks: LruMap::new(ByLength::new(cfg.session_cache_lru_size)),
keystore: cfg.keystore,
}
}
/// Returns the session index expected at any child of the `parent` block.
/// This does not return the session index for the `parent` block.
pub async fn get_session_index_for_child<Sender>(
&mut self,
sender: &mut Sender,
parent: Hash,
) -> Result<SessionIndex>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
match self.session_index_cache.get(&parent) {
Some(index) => Ok(*index),
None => {
let index =
recv_runtime(request_session_index_for_child(parent, sender).await).await?;
self.session_index_cache.insert(parent, index);
Ok(index)
},
}
}
/// Pin a given block in the given session if none are pinned in that session.
/// Unpinning will happen automatically when LRU cache grows over the limit.
pub fn pin_block(&mut self, session_index: SessionIndex, unpin_handle: UnpinHandle) {
self.pinned_blocks.get_or_insert(session_index, || unpin_handle);
}
/// Get the hash of a pinned block for the given session index, if any.
pub fn get_block_in_session(&self, session_index: SessionIndex) -> Option<Hash> {
self.pinned_blocks.peek(&session_index).map(|h| h.hash())
}
/// Get `ExtendedSessionInfo` by relay parent hash.
pub async fn get_session_info<'a, Sender>(
&'a mut self,
sender: &mut Sender,
relay_parent: Hash,
) -> Result<&'a ExtendedSessionInfo>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
let session_index = self.get_session_index_for_child(sender, relay_parent).await?;
self.get_session_info_by_index(sender, relay_parent, session_index).await
}
/// Get the list of disabled validators at the relay parent.
pub async fn get_disabled_validators<Sender>(
&mut self,
sender: &mut Sender,
relay_parent: Hash,
) -> Result<Vec<ValidatorIndex>>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
match self.disabled_validators_cache.get(&relay_parent).cloned() {
Some(result) => Ok(result),
None => {
let disabled_validators =
request_disabled_validators(relay_parent, sender).await.await??;
self.disabled_validators_cache.insert(relay_parent, disabled_validators.clone());
Ok(disabled_validators)
},
}
}
/// Get `ExtendedSessionInfo` by session index.
///
/// `request_session_info` still requires the parent to be passed in, so we take the parent
/// in addition to the `SessionIndex`.
pub async fn get_session_info_by_index<'a, Sender>(
&'a mut self,
sender: &mut Sender,
parent: Hash,
session_index: SessionIndex,
) -> Result<&'a ExtendedSessionInfo>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
if self.session_info_cache.get(&session_index).is_none() {
let session_info =
recv_runtime(request_session_info(parent, session_index, sender).await)
.await?
.ok_or(JfyiError::NoSuchSession(session_index))?;
let executor_params =
recv_runtime(request_session_executor_params(parent, session_index, sender).await)
.await?
.ok_or(JfyiError::NoExecutorParams(session_index))?;
let validator_info = self.get_validator_info(&session_info)?;
let node_features =
request_node_features(parent, session_index, sender).await.await??;
let last_set_index = node_features.iter_ones().last().unwrap_or_default();
if last_set_index >= FeatureIndex::FirstUnassigned as usize {
gum::warn!(target: LOG_TARGET, "Runtime requires feature bit {} that node doesn't support, please upgrade node version", last_set_index);
}
let full_info = ExtendedSessionInfo {
session_info,
validator_info,
executor_params,
node_features,
};
self.session_info_cache.insert(session_index, full_info);
}
Ok(self
.session_info_cache
.get(&session_index)
.expect("We just put the value there. qed."))
}
/// Convenience function for checking the signature of something signed.
pub async fn check_signature<Sender, Payload, RealPayload>(
&mut self,
sender: &mut Sender,
relay_parent: Hash,
signed: UncheckedSigned<Payload, RealPayload>,
) -> Result<
std::result::Result<Signed<Payload, RealPayload>, UncheckedSigned<Payload, RealPayload>>,
>
where
Sender: SubsystemSender<RuntimeApiMessage>,
Payload: EncodeAs<RealPayload> + Clone,
RealPayload: Encode + Clone,
{
let session_index = self.get_session_index_for_child(sender, relay_parent).await?;
let info = self.get_session_info_by_index(sender, relay_parent, session_index).await?;
Ok(check_signature(session_index, &info.session_info, relay_parent, signed))
}
/// Build `ValidatorInfo` for the current session.
///
///
/// Returns: `None` if not a teyrchain validator.
fn get_validator_info(&self, session_info: &SessionInfo) -> Result<ValidatorInfo> {
if let Some(our_index) = self.get_our_index(&session_info.validators) {
// Get our group index:
let our_group =
session_info.validator_groups.iter().enumerate().find_map(|(i, g)| {
g.iter().find_map(|v| {
if *v == our_index {
Some(GroupIndex(i as u32))
} else {
None
}
})
});
let info = ValidatorInfo { our_index: Some(our_index), our_group };
return Ok(info);
}
return Ok(ValidatorInfo { our_index: None, our_group: None });
}
/// Get our `ValidatorIndex`.
///
/// Returns: None if we are not a validator.
fn get_our_index(
&self,
validators: &IndexedVec<ValidatorIndex, ValidatorId>,
) -> Option<ValidatorIndex> {
let keystore = self.keystore.as_ref()?;
for (i, v) in validators.iter().enumerate() {
if Keystore::has_keys(&**keystore, &[(v.to_raw_vec(), ValidatorId::ID)]) {
return Some(ValidatorIndex(i as u32));
}
}
None
}
}
/// Convenience function for quickly checking the signature on signed data.
pub fn check_signature<Payload, RealPayload>(
session_index: SessionIndex,
session_info: &SessionInfo,
relay_parent: Hash,
signed: UncheckedSigned<Payload, RealPayload>,
) -> std::result::Result<Signed<Payload, RealPayload>, UncheckedSigned<Payload, RealPayload>>
where
Payload: EncodeAs<RealPayload> + Clone,
RealPayload: Encode + Clone,
{
let signing_context = SigningContext { session_index, parent_hash: relay_parent };
session_info
.validators
.get(signed.unchecked_validator_index())
.ok_or_else(|| signed.clone())
.and_then(|v| signed.try_into_checked(&signing_context, v))
}
/// Request availability cores from the runtime.
pub async fn get_availability_cores<Sender>(
sender: &mut Sender,
relay_parent: Hash,
) -> Result<Vec<CoreState>>
where
Sender: overseer::SubsystemSender<RuntimeApiMessage>,
{
recv_runtime(request_availability_cores(relay_parent, sender).await).await
}
/// Variant of `request_availability_cores` that only returns occupied ones.
pub async fn get_occupied_cores<Sender>(
sender: &mut Sender,
relay_parent: Hash,
) -> Result<Vec<(CoreIndex, OccupiedCore)>>
where
Sender: overseer::SubsystemSender<RuntimeApiMessage>,
{
let cores = get_availability_cores(sender, relay_parent).await?;
Ok(cores
.into_iter()
.enumerate()
.filter_map(|(core_index, core_state)| {
if let CoreState::Occupied(occupied) = core_state {
Some((CoreIndex(core_index as u32), occupied))
} else {
None
}
})
.collect())
}
/// Get group rotation info based on the given `relay_parent`.
pub async fn get_group_rotation_info<Sender>(
sender: &mut Sender,
relay_parent: Hash,
) -> Result<GroupRotationInfo>
where
Sender: overseer::SubsystemSender<RuntimeApiMessage>,
{
// We drop `groups` here as we don't need them, because of `RuntimeInfo`. Ideally we would not
// fetch them in the first place.
let (_, info) = recv_runtime(request_validator_groups(relay_parent, sender).await).await?;
Ok(info)
}
/// Get `CandidateEvent`s for the given `relay_parent`.
pub async fn get_candidate_events<Sender>(
sender: &mut Sender,
relay_parent: Hash,
) -> Result<Vec<CandidateEvent>>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
recv_runtime(request_candidate_events(relay_parent, sender).await).await
}
/// Get on chain votes.
pub async fn get_on_chain_votes<Sender>(
sender: &mut Sender,
relay_parent: Hash,
) -> Result<Option<ScrapedOnChainVotes>>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
recv_runtime(request_on_chain_votes(relay_parent, sender).await).await
}
/// Fetch `ValidationCode` by hash from the runtime.
pub async fn get_validation_code_by_hash<Sender>(
sender: &mut Sender,
relay_parent: Hash,
validation_code_hash: ValidationCodeHash,
) -> Result<Option<ValidationCode>>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
recv_runtime(request_validation_code_by_hash(relay_parent, validation_code_hash, sender).await)
.await
}
/// Fetch a list of `PendingSlashes` from the runtime.
/// Will fallback to `unapplied_slashes` if the runtime does not
/// support `unapplied_slashes_v2`.
pub async fn get_unapplied_slashes<Sender>(
sender: &mut Sender,
relay_parent: Hash,
) -> Result<Vec<(SessionIndex, CandidateHash, slashing::PendingSlashes)>>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
match recv_runtime(request_unapplied_slashes_v2(relay_parent, sender).await).await {
Ok(v2) => Ok(v2),
Err(Error::RuntimeRequest(RuntimeApiError::NotSupported { .. })) => {
// Fallback to legacy unapplied_slashes
let legacy =
recv_runtime(request_unapplied_slashes(relay_parent, sender).await).await?;
// Convert legacy slashes to PendingSlashes
Ok(legacy
.into_iter()
.map(|(session, candidate_hash, legacy_slash)| {
(
session,
candidate_hash,
slashing::PendingSlashes {
keys: legacy_slash.keys,
kind: legacy_slash.kind.into(),
},
)
})
.collect())
},
Err(e) => Err(e),
}
}
/// Generate validator key ownership proof.
///
/// Note: The choice of `relay_parent` is important here, it needs to match
/// the desired session index of the validator set in question.
pub async fn key_ownership_proof<Sender>(
sender: &mut Sender,
relay_parent: Hash,
validator_id: ValidatorId,
) -> Result<Option<slashing::OpaqueKeyOwnershipProof>>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
recv_runtime(request_key_ownership_proof(relay_parent, validator_id, sender).await).await
}
/// Submit a past-session dispute slashing report.
pub async fn submit_report_dispute_lost<Sender>(
sender: &mut Sender,
relay_parent: Hash,
dispute_proof: slashing::DisputeProof,
key_ownership_proof: slashing::OpaqueKeyOwnershipProof,
) -> Result<Option<()>>
where
Sender: SubsystemSender<RuntimeApiMessage>,
{
recv_runtime(
request_submit_report_dispute_lost(
relay_parent,
dispute_proof,
key_ownership_proof,
sender,
)
.await,
)
.await
}
/// A snapshot of the runtime claim queue at an arbitrary relay chain block.
#[derive(Default, Clone, Debug)]
pub struct ClaimQueueSnapshot(pub BTreeMap<CoreIndex, VecDeque<ParaId>>);
impl From<BTreeMap<CoreIndex, VecDeque<ParaId>>> for ClaimQueueSnapshot {
fn from(claim_queue_snapshot: BTreeMap<CoreIndex, VecDeque<ParaId>>) -> Self {
ClaimQueueSnapshot(claim_queue_snapshot)
}
}
impl ClaimQueueSnapshot {
/// Returns the `ParaId` that has a claim for `core_index` at the specified `depth` in the
/// claim queue. A depth of `0` means the very next block.
pub fn get_claim_for(&self, core_index: CoreIndex, depth: usize) -> Option<ParaId> {
self.0.get(&core_index)?.get(depth).copied()
}
/// Returns an iterator over all claimed cores and the claiming `ParaId` at the specified
/// `depth` in the claim queue.
pub fn iter_claims_at_depth(
&self,
depth: usize,
) -> impl Iterator<Item = (CoreIndex, ParaId)> + '_ {
self.0
.iter()
.filter_map(move |(core_index, paras)| Some((*core_index, *paras.get(depth)?)))
}
/// Returns an iterator over all claims on the given core.
pub fn iter_claims_for_core(
&self,
core_index: &CoreIndex,
) -> impl Iterator<Item = &ParaId> + '_ {
self.0.get(core_index).map(|c| c.iter()).into_iter().flatten()
}
/// Returns an iterator over the whole claim queue.
pub fn iter_all_claims(&self) -> impl Iterator<Item = (&CoreIndex, &VecDeque<ParaId>)> + '_ {
self.0.iter()
}
/// Get all claimed cores for the given `para_id` at the specified depth.
pub fn iter_claims_at_depth_for_para(
&self,
depth: usize,
para_id: ParaId,
) -> impl Iterator<Item = CoreIndex> + '_ {
self.0.iter().filter_map(move |(core_index, ids)| {
ids.get(depth).filter(|id| **id == para_id).map(|_| *core_index)
})
}
}
/// Fetch the claim queue and wrap it into a helpful `ClaimQueueSnapshot`
pub async fn fetch_claim_queue(
sender: &mut impl SubsystemSender<RuntimeApiMessage>,
relay_parent: Hash,
) -> Result<ClaimQueueSnapshot> {
let cq = request_claim_queue(relay_parent, sender)
.await
.await
.map_err(Error::RuntimeRequestCanceled)??;
Ok(cq.into())
}
/// Returns the lookahead from the scheduler params if the runtime supports it,
/// or default value if scheduling lookahead API is not supported by runtime.
pub async fn fetch_scheduling_lookahead(
parent: Hash,
session_index: SessionIndex,
sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
) -> Result<u32> {
let res = recv_runtime(
request_from_runtime(parent, sender, |tx| {
RuntimeApiRequest::SchedulingLookahead(session_index, tx)
})
.await,
)
.await;
if let Err(Error::RuntimeRequest(RuntimeApiError::NotSupported { .. })) = res {
gum::trace!(
target: LOG_TARGET,
?parent,
"Querying the scheduling lookahead from the runtime is not supported by the current Runtime API, falling back to default value of {}",
DEFAULT_SCHEDULING_LOOKAHEAD
);
Ok(DEFAULT_SCHEDULING_LOOKAHEAD)
} else {
res
}
}
/// Fetch the validation code bomb limit from the runtime.
pub async fn fetch_validation_code_bomb_limit(
parent: Hash,
session_index: SessionIndex,
sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
) -> Result<u32> {
let res = recv_runtime(
request_from_runtime(parent, sender, |tx| {
RuntimeApiRequest::ValidationCodeBombLimit(session_index, tx)
})
.await,
)
.await;
if let Err(Error::RuntimeRequest(RuntimeApiError::NotSupported { .. })) = res {
gum::trace!(
target: LOG_TARGET,
?parent,
"Querying the validation code bomb limit from the runtime is not supported by the current Runtime API",
);
// TODO: Remove this once runtime API version 12 is released.
#[allow(deprecated)]
Ok(pezkuwi_node_primitives::VALIDATION_CODE_BOMB_LIMIT as u32)
} else {
res
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn iter_claims_at_depth_for_para_works() {
let claim_queue = ClaimQueueSnapshot(BTreeMap::from_iter(
[
(
CoreIndex(0),
VecDeque::from_iter([ParaId::from(1), ParaId::from(2), ParaId::from(1)]),
),
(
CoreIndex(1),
VecDeque::from_iter([ParaId::from(1), ParaId::from(1), ParaId::from(2)]),
),
(
CoreIndex(2),
VecDeque::from_iter([ParaId::from(1), ParaId::from(2), ParaId::from(3)]),
),
(
CoreIndex(3),
VecDeque::from_iter([ParaId::from(2), ParaId::from(1), ParaId::from(3)]),
),
]
.into_iter(),
));
// Test getting claims for para_id 1 at depth 0: cores 0, 1, 2
let depth_0_cores =
claim_queue.iter_claims_at_depth_for_para(0, 1u32.into()).collect::<Vec<_>>();
assert_eq!(depth_0_cores.len(), 3);
assert_eq!(depth_0_cores, vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)]);
// Test getting claims for para_id 1 at depth 1: cores 1, 3
let depth_1_cores =
claim_queue.iter_claims_at_depth_for_para(1, 1u32.into()).collect::<Vec<_>>();
assert_eq!(depth_1_cores.len(), 2);
assert_eq!(depth_1_cores, vec![CoreIndex(1), CoreIndex(3)]);
// Test getting claims for para_id 1 at depth 2: core 0
let depth_2_cores =
claim_queue.iter_claims_at_depth_for_para(2, 1u32.into()).collect::<Vec<_>>();
assert_eq!(depth_2_cores.len(), 1);
assert_eq!(depth_2_cores, vec![CoreIndex(0)]);
// Test getting claims for para_id 1 at depth 3: no claims
let depth_3_cores =
claim_queue.iter_claims_at_depth_for_para(3, 1u32.into()).collect::<Vec<_>>();
assert!(depth_3_cores.is_empty());
// Test getting claims for para_id 2 at depth 0: core 3
let depth_0_cores =
claim_queue.iter_claims_at_depth_for_para(0, 2u32.into()).collect::<Vec<_>>();
assert_eq!(depth_0_cores.len(), 1);
assert_eq!(depth_0_cores, vec![CoreIndex(3)]);
// Test getting claims for para_id 2 at depth 1: cores 0, 2
let depth_1_cores =
claim_queue.iter_claims_at_depth_for_para(1, 2u32.into()).collect::<Vec<_>>();
assert_eq!(depth_1_cores.len(), 2);
assert_eq!(depth_1_cores, vec![CoreIndex(0), CoreIndex(2)]);
// Test getting claims for para_id 2 at depth 2: core 1
let depth_2_cores =
claim_queue.iter_claims_at_depth_for_para(2, 2u32.into()).collect::<Vec<_>>();
assert_eq!(depth_2_cores.len(), 1);
assert_eq!(depth_2_cores, vec![CoreIndex(1)]);
// Test getting claims for para_id 3 at depth 0: no claims
let depth_0_cores =
claim_queue.iter_claims_at_depth_for_para(0, 3u32.into()).collect::<Vec<_>>();
assert!(depth_0_cores.is_empty());
// Test getting claims for para_id 3 at depth 1: no claims
let depth_1_cores =
claim_queue.iter_claims_at_depth_for_para(1, 3u32.into()).collect::<Vec<_>>();
assert!(depth_1_cores.is_empty());
// Test getting claims for para_id 3 at depth 2: cores 2, 3
let depth_2_cores =
claim_queue.iter_claims_at_depth_for_para(2, 3u32.into()).collect::<Vec<_>>();
assert_eq!(depth_2_cores.len(), 2);
assert_eq!(depth_2_cores, vec![CoreIndex(2), CoreIndex(3)]);
// Test getting claims for non-existent para_id at depth 0: no claims
let depth_0_cores =
claim_queue.iter_claims_at_depth_for_para(0, 99u32.into()).collect::<Vec<_>>();
assert!(depth_0_cores.is_empty());
}
}
+96
View File
@@ -0,0 +1,96 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
#![cfg(test)]
use super::*;
use executor::block_on;
use futures::{channel::mpsc, executor, FutureExt, SinkExt, StreamExt};
use pezkuwi_primitives_test_helpers::AlwaysZeroRng;
use std::{
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::Duration,
};
#[test]
fn tick_tack_metronome() {
let n = Arc::new(AtomicUsize::default());
let (tick, mut block) = mpsc::unbounded();
let metronome = {
let n = n.clone();
let stream = Metronome::new(Duration::from_millis(137_u64));
stream
.for_each(move |_res| {
let _ = n.fetch_add(1, Ordering::Relaxed);
let mut tick = tick.clone();
async move {
tick.send(()).await.expect("Test helper channel works. qed");
}
})
.fuse()
};
let f2 = async move {
block.next().await;
assert_eq!(n.load(Ordering::Relaxed), 1_usize);
block.next().await;
assert_eq!(n.load(Ordering::Relaxed), 2_usize);
block.next().await;
assert_eq!(n.load(Ordering::Relaxed), 3_usize);
block.next().await;
assert_eq!(n.load(Ordering::Relaxed), 4_usize);
}
.fuse();
futures::pin_mut!(f2);
futures::pin_mut!(metronome);
block_on(async move {
// futures::join!(metronome, f2)
futures::select!(
_ = metronome => unreachable!("Metronome never stops. qed"),
_ = f2 => (),
)
});
}
#[test]
fn subset_generation_check() {
let mut values = (0_u8..=25).collect::<Vec<_>>();
// 12 even numbers exist
choose_random_subset::<u8, _>(|v| v & 0x01 == 0, &mut values, 12);
values.sort();
for (idx, v) in dbg!(values).into_iter().enumerate() {
assert_eq!(v as usize, idx * 2);
}
}
#[test]
fn subset_predefined_generation_check() {
let mut values = (0_u8..=25).collect::<Vec<_>>();
choose_random_subset_with_rng::<u8, _, _>(|_| false, &mut values, &mut AlwaysZeroRng, 12);
assert_eq!(values.len(), 12);
for (idx, v) in dbg!(values).into_iter().enumerate() {
// Since shuffle actually shuffles the indexes from 1..len, then
// our PRG that returns zeroes will shuffle 0 and 1, 1 and 2, ... len-2 and len-1
assert_eq!(v as usize, idx + 1);
}
}
@@ -0,0 +1,20 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Contains helpers for staging runtime calls.
//!
//! This module is intended to contain common boiler plate code handling unreleased runtime API
//! calls.