feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit e4778b4576
6838 changed files with 1847450 additions and 0 deletions
@@ -0,0 +1,113 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//
//! Error handling related code and Error/Result definitions.
use pezkuwi_node_network_protocol::PeerId;
use pezkuwi_node_subsystem::{RuntimeApiError, SubsystemError};
use pezkuwi_node_subsystem_util::{
backing_implicit_view::FetchError as ImplicitViewFetchError, runtime,
};
use pezkuwi_primitives::{CandidateHash, Hash, Id as ParaId};
use futures::channel::oneshot;
/// General result.
pub type Result<T> = std::result::Result<T, Error>;
/// Result for non-fatal only failures.
pub type JfyiErrorResult<T> = std::result::Result<T, JfyiError>;
/// Result for fatal only failures.
pub type FatalResult<T> = std::result::Result<T, FatalError>;
#[allow(missing_docs)]
#[fatality::fatality(splitable)]
pub enum Error {
#[fatal]
#[error("Requester receiver stream finished")]
RequesterReceiverFinished,
#[fatal]
#[error("Responder receiver stream finished")]
ResponderReceiverFinished,
#[fatal]
#[error("Spawning subsystem task failed")]
SpawnTask(#[source] SubsystemError),
#[fatal]
#[error("Receiving message from overseer failed")]
SubsystemReceive(#[source] SubsystemError),
#[fatal(forward)]
#[error("Error while accessing runtime information")]
Runtime(#[from] runtime::Error),
#[error("RuntimeAPISubsystem channel closed before receipt")]
RuntimeApiUnavailable(#[source] oneshot::Canceled),
#[error("Fetching persisted validation data for para {0:?}, {1:?}")]
FetchPersistedValidationData(ParaId, RuntimeApiError),
#[error("Fetching session index failed {0:?}")]
FetchSessionIndex(RuntimeApiError),
#[error("Fetching session info failed {0:?}")]
FetchSessionInfo(RuntimeApiError),
#[error("Fetching disabled validators failed {0:?}")]
FetchDisabledValidators(RuntimeApiError),
#[error("Fetching validator groups failed {0:?}")]
FetchValidatorGroups(RuntimeApiError),
#[error("Fetching claim queue failed {0:?}")]
FetchClaimQueue(RuntimeApiError),
#[error("Fetching minimum backing votes failed {0:?}")]
FetchMinimumBackingVotes(RuntimeApiError),
#[error("Fetching node features failed {0:?}")]
FetchNodeFeatures(RuntimeApiError),
#[error("Attempted to share statement when not a validator or not assigned")]
InvalidShare,
#[error("Relay parent could not be found in active heads")]
NoSuchHead(Hash),
#[error("Message from not connected peer")]
NoSuchPeer(PeerId),
#[error("Peer requested data for candidate it never received a notification for (malicious?)")]
RequestedUnannouncedCandidate(PeerId, CandidateHash),
// A large statement status was requested, which could not be found.
#[error("Statement status does not exist")]
NoSuchLargeStatementStatus(Hash, CandidateHash),
// A fetched large statement was requested, but could not be found.
#[error("Fetched large statement does not exist")]
NoSuchFetchedLargeStatement(Hash, CandidateHash),
// Responder no longer waits for our data. (Should not happen right now.)
#[error("Oneshot `GetData` channel closed")]
ResponderGetDataCanceled,
// Failed to activate leaf due to a fetch error.
#[error("Implicit view failure while activating leaf")]
ActivateLeafFailure(ImplicitViewFetchError),
}
@@ -0,0 +1,276 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! The Statement Distribution Subsystem.
//!
//! This is responsible for distributing signed statements about candidate
//! validity among validators.
#![warn(missing_docs)]
use error::FatalResult;
use std::time::Duration;
use pezkuwi_node_network_protocol::request_response::{
v2::AttestedCandidateRequest, IncomingRequestReceiver,
};
use pezkuwi_node_subsystem::{
messages::StatementDistributionMessage, overseer, ActiveLeavesUpdate, FromOrchestra,
OverseerSignal, SpawnedSubsystem, SubsystemError,
};
use pezkuwi_node_subsystem_util::reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL};
use futures::{channel::mpsc, prelude::*};
use sp_keystore::KeystorePtr;
use fatality::Nested;
mod error;
pub use error::{Error, FatalError, JfyiError, Result};
/// Metrics for the statement distribution
pub(crate) mod metrics;
use metrics::Metrics;
mod v2;
const LOG_TARGET: &str = "teyrchain::statement-distribution";
/// The statement distribution subsystem.
pub struct StatementDistributionSubsystem {
/// Pointer to a keystore, which is required for determining this node's validator index.
keystore: KeystorePtr,
/// Receiver for incoming candidate requests.
req_receiver: Option<IncomingRequestReceiver<AttestedCandidateRequest>>,
/// Prometheus metrics
metrics: Metrics,
/// Aggregated reputation change
reputation: ReputationAggregator,
}
#[overseer::subsystem(StatementDistribution, error=SubsystemError, prefix=self::overseer)]
impl<Context> StatementDistributionSubsystem {
fn start(self, ctx: Context) -> SpawnedSubsystem {
// Swallow error because failure is fatal to the node and we log with more precision
// within `run`.
SpawnedSubsystem {
name: "statement-distribution-subsystem",
future: self
.run(ctx)
.map_err(|e| SubsystemError::with_origin("statement-distribution", e))
.boxed(),
}
}
}
/// Messages to be handled in this subsystem.
enum MuxedMessage {
/// Messages from other subsystems.
Subsystem(FatalResult<FromOrchestra<StatementDistributionMessage>>),
/// Messages from candidate responder background task.
Responder(Option<v2::ResponderMessage>),
/// Messages from answered requests.
Response(v2::UnhandledResponse),
/// Message that a request is ready to be retried. This just acts as a signal that we should
/// dispatch all pending requests again.
RetryRequest(()),
}
#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)]
impl MuxedMessage {
async fn receive<Context>(
ctx: &mut Context,
state: &mut v2::State,
from_responder: &mut mpsc::Receiver<v2::ResponderMessage>,
) -> MuxedMessage {
let (request_manager, response_manager) = state.request_and_response_managers();
// We are only fusing here to make `select` happy, in reality we will quit if one of those
// streams end:
let from_orchestra = ctx.recv().fuse();
let from_responder = from_responder.next();
let receive_response = v2::receive_response(response_manager).fuse();
let retry_request = v2::next_retry(request_manager).fuse();
futures::pin_mut!(from_orchestra, from_responder, receive_response, retry_request,);
futures::select! {
msg = from_orchestra => MuxedMessage::Subsystem(msg.map_err(FatalError::SubsystemReceive)),
msg = from_responder => MuxedMessage::Responder(msg),
msg = receive_response => MuxedMessage::Response(msg),
msg = retry_request => MuxedMessage::RetryRequest(msg),
}
}
}
#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)]
impl StatementDistributionSubsystem {
/// Create a new Statement Distribution Subsystem
pub fn new(
keystore: KeystorePtr,
req_receiver: IncomingRequestReceiver<AttestedCandidateRequest>,
metrics: Metrics,
) -> Self {
Self { keystore, req_receiver: Some(req_receiver), metrics, reputation: Default::default() }
}
async fn run<Context>(self, ctx: Context) -> std::result::Result<(), FatalError> {
self.run_inner(ctx, REPUTATION_CHANGE_INTERVAL).await
}
async fn run_inner<Context>(
mut self,
mut ctx: Context,
reputation_interval: Duration,
) -> std::result::Result<(), FatalError> {
let new_reputation_delay = || futures_timer::Delay::new(reputation_interval).fuse();
let mut reputation_delay = new_reputation_delay();
let mut state = crate::v2::State::new(self.keystore.clone());
// Sender/receiver for getting news from our candidate responder task.
let (res_sender, mut res_receiver) = mpsc::channel(1);
ctx.spawn(
"candidate-responder",
v2::respond_task(
self.req_receiver.take().expect("Mandatory argument to new. qed"),
res_sender.clone(),
self.metrics.clone(),
)
.boxed(),
)
.map_err(FatalError::SpawnTask)?;
loop {
// Wait for the next message.
let message = futures::select! {
_ = reputation_delay => {
self.reputation.send(ctx.sender()).await;
reputation_delay = new_reputation_delay();
continue
},
message = MuxedMessage::receive(
&mut ctx,
&mut state,
&mut res_receiver,
).fuse() => {
message
}
};
match message {
MuxedMessage::Subsystem(result) => {
let result = self.handle_subsystem_message(&mut ctx, &mut state, result?).await;
match result.into_nested()? {
Ok(true) => break,
Ok(false) => {},
Err(jfyi) => gum::debug!(target: LOG_TARGET, error = ?jfyi),
}
},
MuxedMessage::Responder(result) => {
v2::answer_request(
&mut state,
result.ok_or(FatalError::RequesterReceiverFinished)?,
);
},
MuxedMessage::Response(result) => {
v2::handle_response(
&mut ctx,
&mut state,
result,
&mut self.reputation,
&self.metrics,
)
.await;
},
MuxedMessage::RetryRequest(()) => {
// A pending request is ready to retry. This is only a signal to call
// `dispatch_requests` again.
()
},
};
v2::dispatch_requests(&mut ctx, &mut state).await;
}
Ok(())
}
async fn handle_subsystem_message<Context>(
&mut self,
ctx: &mut Context,
state: &mut v2::State,
message: FromOrchestra<StatementDistributionMessage>,
) -> Result<bool> {
let metrics = &self.metrics;
match message {
FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
activated,
deactivated,
})) => {
let _timer = metrics.time_active_leaves_update();
if let Some(ref activated) = activated {
let res =
v2::handle_active_leaves_update(ctx, state, activated, &metrics).await;
// Regardless of the result of leaf activation, we always prune before
// handling it to avoid leaks.
v2::handle_deactivate_leaves(state, &deactivated);
res?;
} else {
v2::handle_deactivate_leaves(state, &deactivated);
}
},
FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {
// do nothing
},
FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(true),
FromOrchestra::Communication { msg } => match msg {
StatementDistributionMessage::Share(relay_parent, statement) => {
let _timer = metrics.time_share();
v2::share_local_statement(
ctx,
state,
relay_parent,
statement,
&mut self.reputation,
&self.metrics,
)
.await?;
},
StatementDistributionMessage::NetworkBridgeUpdate(event) => {
v2::handle_network_update(
ctx,
state,
event,
&mut self.reputation,
&self.metrics,
)
.await;
},
StatementDistributionMessage::Backed(candidate_hash) => {
crate::v2::handle_backed_candidate_message(
ctx,
state,
candidate_hash,
&self.metrics,
)
.await;
},
},
}
Ok(false)
}
}
@@ -0,0 +1,242 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Metrics for the statement distribution module
use pezkuwi_node_subsystem_util::metrics::{self, prometheus};
/// Buckets more suitable for checking the typical latency values
const HISTOGRAM_LATENCY_BUCKETS: &[f64] = &[
0.000025, 0.00005, 0.000075, 0.0001, 0.0003125, 0.000625, 0.00125, 0.0025, 0.005, 0.01, 0.025,
0.05, 0.1,
];
#[derive(Clone)]
struct MetricsInner {
// V1
sent_requests: prometheus::Counter<prometheus::U64>,
received_responses: prometheus::CounterVec<prometheus::U64>,
network_bridge_update: prometheus::HistogramVec,
statements_unexpected: prometheus::CounterVec<prometheus::U64>,
created_message_size: prometheus::Gauge<prometheus::U64>,
// V1+
statements_distributed: prometheus::Counter<prometheus::U64>,
active_leaves_update: prometheus::Histogram,
share: prometheus::Histogram,
// V2+
peer_rate_limit_request_drop: prometheus::Counter<prometheus::U64>,
max_parallel_requests_reached: prometheus::Counter<prometheus::U64>,
}
/// Statement Distribution metrics.
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
impl Metrics {
/// Update statements distributed counter
pub fn on_statement_distributed(&self) {
if let Some(metrics) = &self.0 {
metrics.statements_distributed.inc();
}
}
/// Update statements distributed counter by an amount
pub fn on_statements_distributed(&self, n: usize) {
if let Some(metrics) = &self.0 {
metrics.statements_distributed.inc_by(n as u64);
}
}
/// Update sent requests counter
/// This counter is updated merely for the statements sent via request/response method,
/// meaning that it counts large statements only
pub fn on_sent_request(&self) {
if let Some(metrics) = &self.0 {
metrics.sent_requests.inc();
}
}
/// Update counters for the received responses with `succeeded` or `failed` labels
/// These counters are updated merely for the statements received via request/response method,
/// meaning that they count large statements only
pub fn on_received_response(&self, success: bool) {
if let Some(metrics) = &self.0 {
let label = if success { "succeeded" } else { "failed" };
metrics.received_responses.with_label_values(&[label]).inc();
}
}
/// Provide a timer for `active_leaves_update` which observes on drop.
pub fn time_active_leaves_update(
&self,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.active_leaves_update.start_timer())
}
/// Provide a timer for `share` which observes on drop.
pub fn time_share(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| metrics.share.start_timer())
}
/// Provide a timer for `network_bridge_update` which observes on drop.
pub fn time_network_bridge_update(
&self,
message_type: &'static str,
) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
self.0.as_ref().map(|metrics| {
metrics.network_bridge_update.with_label_values(&[message_type]).start_timer()
})
}
/// Update the out-of-view statements counter for unexpected valid statements
pub fn on_unexpected_statement_valid(&self) {
if let Some(metrics) = &self.0 {
metrics.statements_unexpected.with_label_values(&["valid"]).inc();
}
}
/// Update the out-of-view statements counter for unexpected seconded statements
pub fn on_unexpected_statement_seconded(&self) {
if let Some(metrics) = &self.0 {
metrics.statements_unexpected.with_label_values(&["seconded"]).inc();
}
}
/// Update the out-of-view statements counter for unexpected large statements
pub fn on_unexpected_statement_large(&self) {
if let Some(metrics) = &self.0 {
metrics.statements_unexpected.with_label_values(&["large"]).inc();
}
}
/// Report size of a created message.
pub fn on_created_message(&self, size: usize) {
if let Some(metrics) = &self.0 {
metrics.created_message_size.set(size as u64);
}
}
/// Update sent dropped requests counter when request dropped because
/// of peer rate limit
pub fn on_request_dropped_peer_rate_limit(&self) {
if let Some(metrics) = &self.0 {
metrics.peer_rate_limit_request_drop.inc();
}
}
/// Update max parallel requests reached counter
/// This counter is updated when the maximum number of parallel requests is reached
/// and we are waiting for one of the requests to finish
pub fn on_max_parallel_requests_reached(&self) {
if let Some(metrics) = &self.0 {
metrics.max_parallel_requests_reached.inc();
}
}
}
impl metrics::Metrics for Metrics {
fn try_register(
registry: &prometheus::Registry,
) -> std::result::Result<Self, prometheus::PrometheusError> {
let metrics = MetricsInner {
statements_distributed: prometheus::register(
prometheus::Counter::new(
"pezkuwi_teyrchain_statements_distributed_total",
"Number of candidate validity statements distributed to other peers.",
)?,
registry,
)?,
sent_requests: prometheus::register(
prometheus::Counter::new(
"pezkuwi_teyrchain_statement_distribution_sent_requests_total",
"Number of large statement fetching requests sent.",
)?,
registry,
)?,
received_responses: prometheus::register(
prometheus::CounterVec::new(
prometheus::Opts::new(
"pezkuwi_teyrchain_statement_distribution_received_responses_total",
"Number of received responses for large statement data.",
),
&["success"],
)?,
registry,
)?,
active_leaves_update: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"pezkuwi_teyrchain_statement_distribution_active_leaves_update",
"Time spent within `statement_distribution::active_leaves_update`",
)
.buckets(HISTOGRAM_LATENCY_BUCKETS.into()),
)?,
registry,
)?,
share: prometheus::register(
prometheus::Histogram::with_opts(
prometheus::HistogramOpts::new(
"pezkuwi_teyrchain_statement_distribution_share",
"Time spent within `statement_distribution::share`",
)
.buckets(HISTOGRAM_LATENCY_BUCKETS.into()),
)?,
registry,
)?,
network_bridge_update: prometheus::register(
prometheus::HistogramVec::new(
prometheus::HistogramOpts::new(
"pezkuwi_teyrchain_statement_distribution_network_bridge_update",
"Time spent within `statement_distribution::network_bridge_update`",
)
.buckets(HISTOGRAM_LATENCY_BUCKETS.into()),
&["message_type"],
)?,
registry,
)?,
statements_unexpected: prometheus::register(
prometheus::CounterVec::new(
prometheus::Opts::new(
"pezkuwi_teyrchain_statement_distribution_statements_unexpected",
"Number of statements that were not expected to be received.",
),
&["type"],
)?,
registry,
)?,
created_message_size: prometheus::register(
prometheus::Gauge::with_opts(prometheus::Opts::new(
"pezkuwi_teyrchain_statement_distribution_created_message_size",
"Size of created messages containing Seconded statements.",
))?,
registry,
)?,
peer_rate_limit_request_drop: prometheus::register(
prometheus::Counter::new(
"pezkuwi_teyrchain_statement_distribution_peer_rate_limit_request_drop_total",
"Number of statement distribution requests dropped because of the peer rate limiting.",
)?,
registry,
)?,
max_parallel_requests_reached: prometheus::register(
prometheus::Counter::new(
"pezkuwi_teyrchain_statement_distribution_max_parallel_requests_reached_total",
"Number of times the maximum number of parallel requests was reached.",
)?,
registry,
)?,
};
Ok(Metrics(Some(metrics)))
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,74 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A utility for tracking groups and their members within a session.
use pezkuwi_primitives::{effective_minimum_backing_votes, GroupIndex, IndexedVec, ValidatorIndex};
use std::collections::HashMap;
/// Validator groups within a session, plus some helpful indexing for
/// looking up groups by validator indices or authority discovery ID.
#[derive(Debug, Clone)]
pub struct Groups {
groups: IndexedVec<GroupIndex, Vec<ValidatorIndex>>,
by_validator_index: HashMap<ValidatorIndex, GroupIndex>,
backing_threshold: u32,
}
impl Groups {
/// Create a new [`Groups`] tracker with the groups and discovery keys
/// from the session.
pub fn new(
groups: IndexedVec<GroupIndex, Vec<ValidatorIndex>>,
backing_threshold: u32,
) -> Self {
let mut by_validator_index = HashMap::new();
for (i, group) in groups.iter().enumerate() {
let index = GroupIndex(i as _);
for v in group {
by_validator_index.insert(*v, index);
}
}
Groups { groups, by_validator_index, backing_threshold }
}
/// Access all the underlying groups.
pub fn all(&self) -> &IndexedVec<GroupIndex, Vec<ValidatorIndex>> {
&self.groups
}
/// Get the underlying group validators by group index.
pub fn get(&self, group_index: GroupIndex) -> Option<&[ValidatorIndex]> {
self.groups.get(group_index).map(|x| &x[..])
}
/// Get the backing group size and backing threshold.
pub fn get_size_and_backing_threshold(
&self,
group_index: GroupIndex,
) -> Option<(usize, usize)> {
self.get(group_index)
.map(|g| (g.len(), effective_minimum_backing_votes(g.len(), self.backing_threshold)))
}
/// Get the group index for a validator by index.
pub fn by_validator_index(&self, validator_index: ValidatorIndex) -> Option<GroupIndex> {
self.by_validator_index.get(&validator_index).map(|x| *x)
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,364 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A store of all statements under a given relay-parent.
//!
//! This structure doesn't attempt to do any spam protection, which must
//! be provided at a higher level.
//!
//! This keeps track of statements submitted with a number of different of
//! views into this data: views based on the candidate, views based on the validator
//! groups, and views based on the validators themselves.
use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec};
use pezkuwi_node_network_protocol::v3::StatementFilter;
use pezkuwi_primitives::{
CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex,
};
use std::collections::hash_map::{Entry as HEntry, HashMap};
use super::groups::Groups;
/// Possible origins of a statement.
pub enum StatementOrigin {
/// The statement originated locally.
Local,
/// The statement originated from a remote peer.
Remote,
}
impl StatementOrigin {
fn is_local(&self) -> bool {
match *self {
StatementOrigin::Local => true,
StatementOrigin::Remote => false,
}
}
}
struct StoredStatement {
statement: SignedStatement,
known_by_backing: bool,
}
/// Storage for statements. Intended to be used for statements signed under
/// the same relay-parent. See module docs for more details.
pub struct StatementStore {
validator_meta: HashMap<ValidatorIndex, ValidatorMeta>,
// we keep statements per-group because even though only one group _should_ be
// producing statements about a candidate, until we have the candidate receipt
// itself, we can't tell which group that is.
group_statements: HashMap<(GroupIndex, CandidateHash), GroupStatements>,
known_statements: HashMap<Fingerprint, StoredStatement>,
}
impl StatementStore {
/// Create a new [`StatementStore`]
pub fn new(groups: &Groups) -> Self {
let mut validator_meta = HashMap::new();
for (g, group) in groups.all().iter().enumerate() {
for (i, v) in group.iter().enumerate() {
validator_meta.insert(
*v,
ValidatorMeta {
seconded_count: 0,
within_group_index: i,
group: GroupIndex(g as _),
},
);
}
}
StatementStore {
validator_meta,
group_statements: HashMap::new(),
known_statements: HashMap::new(),
}
}
/// Insert a statement. Returns `true` if was not known already, `false` if it was.
/// Ignores statements by unknown validators and returns an error.
pub fn insert(
&mut self,
groups: &Groups,
statement: SignedStatement,
origin: StatementOrigin,
) -> Result<bool, Error> {
let validator_index = statement.validator_index();
let validator_meta = match self.validator_meta.get_mut(&validator_index) {
None => return Err(Error::ValidatorUnknown),
Some(m) => m,
};
let compact = statement.payload().clone();
let fingerprint = (validator_index, compact.clone());
match self.known_statements.entry(fingerprint) {
HEntry::Occupied(mut e) => {
if let StatementOrigin::Local = origin {
e.get_mut().known_by_backing = true;
}
return Ok(false);
},
HEntry::Vacant(e) => {
e.insert(StoredStatement { statement, known_by_backing: origin.is_local() });
},
}
let candidate_hash = *compact.candidate_hash();
let seconded = if let CompactStatement::Seconded(_) = compact { true } else { false };
// cross-reference updates.
{
let group_index = validator_meta.group;
let group = match groups.get(group_index) {
Some(g) => g,
None => {
gum::error!(
target: crate::LOG_TARGET,
?group_index,
"groups passed into `insert` differ from those used at store creation"
);
return Err(Error::ValidatorUnknown);
},
};
let group_statements = self
.group_statements
.entry((group_index, candidate_hash))
.or_insert_with(|| GroupStatements::with_group_size(group.len()));
if seconded {
validator_meta.seconded_count += 1;
group_statements.note_seconded(validator_meta.within_group_index);
} else {
group_statements.note_validated(validator_meta.within_group_index);
}
}
Ok(true)
}
/// Fill a `StatementFilter` to be used in the grid topology with all statements
/// we are already aware of.
pub fn fill_statement_filter(
&self,
group_index: GroupIndex,
candidate_hash: CandidateHash,
statement_filter: &mut StatementFilter,
) {
if let Some(statements) = self.group_statements.get(&(group_index, candidate_hash)) {
statement_filter.seconded_in_group |= statements.seconded.as_bitslice();
statement_filter.validated_in_group |= statements.valid.as_bitslice();
}
}
/// Get an iterator over stored signed statements by the group conforming to the
/// given filter.
///
/// Seconded statements are provided first.
pub fn group_statements<'a>(
&'a self,
groups: &'a Groups,
group_index: GroupIndex,
candidate_hash: CandidateHash,
filter: &'a StatementFilter,
) -> impl Iterator<Item = &'a SignedStatement> + 'a {
let group_validators = groups.get(group_index);
let seconded_statements = filter
.seconded_in_group
.iter_ones()
.filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i)))
.filter_map(move |v| {
self.known_statements.get(&(*v, CompactStatement::Seconded(candidate_hash)))
})
.map(|s| &s.statement);
let valid_statements = filter
.validated_in_group
.iter_ones()
.filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i)))
.filter_map(move |v| {
self.known_statements.get(&(*v, CompactStatement::Valid(candidate_hash)))
})
.map(|s| &s.statement);
seconded_statements.chain(valid_statements)
}
/// Get the full statement of this kind issued by this validator, if it is known.
pub fn validator_statement(
&self,
validator_index: ValidatorIndex,
statement: CompactStatement,
) -> Option<&SignedStatement> {
self.known_statements.get(&(validator_index, statement)).map(|s| &s.statement)
}
/// Get an iterator over all statements marked as being unknown by the backing subsystem.
/// This provides `Seconded` statements prior to `Valid` statements.
pub fn fresh_statements_for_backing<'a>(
&'a self,
validators: &'a [ValidatorIndex],
candidate_hash: CandidateHash,
) -> impl Iterator<Item = &'a SignedStatement> + 'a {
let s_st = CompactStatement::Seconded(candidate_hash);
let v_st = CompactStatement::Valid(candidate_hash);
let fresh_seconded =
validators.iter().map(move |v| self.known_statements.get(&(*v, s_st.clone())));
let fresh_valid =
validators.iter().map(move |v| self.known_statements.get(&(*v, v_st.clone())));
fresh_seconded
.chain(fresh_valid)
.flatten()
.filter(|stored| !stored.known_by_backing)
.map(|stored| &stored.statement)
}
/// Get the amount of known `Seconded` statements by the given validator index.
pub fn seconded_count(&self, validator_index: &ValidatorIndex) -> usize {
self.validator_meta.get(validator_index).map_or(0, |m| m.seconded_count)
}
/// Note that a statement is known by the backing subsystem.
pub fn note_known_by_backing(
&mut self,
validator_index: ValidatorIndex,
statement: CompactStatement,
) {
if let Some(stored) = self.known_statements.get_mut(&(validator_index, statement)) {
stored.known_by_backing = true;
}
}
}
/// Error when inserting a statement into the statement store.
#[derive(Debug)]
pub enum Error {
/// The validator was unknown.
ValidatorUnknown,
}
type Fingerprint = (ValidatorIndex, CompactStatement);
struct ValidatorMeta {
group: GroupIndex,
within_group_index: usize,
seconded_count: usize,
}
struct GroupStatements {
seconded: BitVec<u8, BitOrderLsb0>,
valid: BitVec<u8, BitOrderLsb0>,
}
impl GroupStatements {
fn with_group_size(group_size: usize) -> Self {
GroupStatements {
seconded: BitVec::repeat(false, group_size),
valid: BitVec::repeat(false, group_size),
}
}
fn note_seconded(&mut self, within_group_index: usize) {
self.seconded.set(within_group_index, true);
}
fn note_validated(&mut self, within_group_index: usize) {
self.valid.set(within_group_index, true);
}
}
#[cfg(test)]
mod tests {
use super::*;
use pezkuwi_primitives::{Hash, SigningContext, ValidatorPair};
use sp_application_crypto::Pair as PairT;
#[test]
fn always_provides_fresh_statements_in_order() {
let validator_a = ValidatorIndex(1);
let validator_b = ValidatorIndex(2);
let candidate_hash = CandidateHash(Hash::repeat_byte(42));
let valid_statement = CompactStatement::Valid(candidate_hash);
let seconded_statement = CompactStatement::Seconded(candidate_hash);
let signing_context =
SigningContext { parent_hash: Hash::repeat_byte(0), session_index: 1 };
let groups = Groups::new(vec![vec![validator_a, validator_b]].into(), 2);
let mut store = StatementStore::new(&groups);
// import a Valid statement from A and a Seconded statement from B.
let signed_valid_by_a = {
let payload = valid_statement.signing_payload(&signing_context);
let pair = ValidatorPair::generate().0;
let signature = pair.sign(&payload[..]);
SignedStatement::new(
valid_statement.clone(),
validator_a,
signature,
&signing_context,
&pair.public(),
)
.unwrap()
};
store.insert(&groups, signed_valid_by_a, StatementOrigin::Remote).unwrap();
let signed_seconded_by_b = {
let payload = seconded_statement.signing_payload(&signing_context);
let pair = ValidatorPair::generate().0;
let signature = pair.sign(&payload[..]);
SignedStatement::new(
seconded_statement.clone(),
validator_b,
signature,
&signing_context,
&pair.public(),
)
.unwrap()
};
store.insert(&groups, signed_seconded_by_b, StatementOrigin::Remote).unwrap();
// Regardless of the order statements are requested,
// we will get them in the order [B, A] because seconded statements must be first.
let vals = &[validator_a, validator_b];
let statements =
store.fresh_statements_for_backing(vals, candidate_hash).collect::<Vec<_>>();
assert_eq!(statements.len(), 2);
assert_eq!(statements[0].payload(), &seconded_statement);
assert_eq!(statements[1].payload(), &valid_statement);
let vals = &[validator_b, validator_a];
let statements =
store.fresh_statements_for_backing(vals, candidate_hash).collect::<Vec<_>>();
assert_eq!(statements.len(), 2);
assert_eq!(statements[0].payload(), &seconded_statement);
assert_eq!(statements[1].payload(), &valid_statement);
}
}
File diff suppressed because it is too large Load Diff
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,920 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
#![allow(clippy::clone_on_copy)]
use super::*;
use crate::*;
use pezkuwi_node_network_protocol::{
grid_topology::TopologyPeerInfo,
request_response::{outgoing::Recipient, ReqProtocolNames},
v3::{BackedCandidateAcknowledgement, BackedCandidateManifest},
view, ObservedRole,
};
use pezkuwi_node_primitives::{Statement, StatementWithPVD};
use pezkuwi_node_subsystem::messages::{
network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, HypotheticalCandidate,
HypotheticalMembership, NetworkBridgeEvent, ProspectiveTeyrchainsMessage, ReportPeerMessage,
RuntimeApiMessage, RuntimeApiRequest,
};
use pezkuwi_node_subsystem_test_helpers as test_helpers;
use pezkuwi_node_subsystem_util::TimeoutExt;
use pezkuwi_primitives::{
AssignmentPair, Block, BlockNumber, CommittedCandidateReceiptV2 as CommittedCandidateReceipt,
GroupRotationInfo, HeadData, Header, IndexedVec, NodeFeatures, PersistedValidationData,
SessionIndex, SessionInfo, ValidatorPair, DEFAULT_SCHEDULING_LOOKAHEAD,
};
use sc_keystore::LocalKeystore;
use sc_network::ProtocolName;
use sp_application_crypto::Pair as PairT;
use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair;
use sp_keyring::Sr25519Keyring;
use assert_matches::assert_matches;
use codec::Encode;
use futures::Future;
use pezkuwi_primitives_test_helpers::rand::{Rng, SeedableRng};
use test_helpers::mock::new_leaf;
use std::sync::Arc;
mod cluster;
mod grid;
mod requests;
type VirtualOverseer =
pezkuwi_node_subsystem_test_helpers::TestSubsystemContextHandle<StatementDistributionMessage>;
// Some deterministic genesis hash for req/res protocol names
const GENESIS_HASH: Hash = Hash::repeat_byte(0xff);
#[derive(Debug, Copy, Clone)]
enum LocalRole {
/// Active validator.
Validator,
/// Authority, not in active validator set.
InactiveValidator,
/// Not a validator.
None,
}
struct TestConfig {
// number of active validators.
validator_count: usize,
// how many validators to place in each group.
group_size: usize,
// whether the local node should be a validator
local_validator: LocalRole,
// allow v2 descriptors (feature bit)
allow_v2_descriptors: bool,
}
#[derive(Debug, Clone)]
struct TestLocalValidator {
validator_index: ValidatorIndex,
group_index: Option<GroupIndex>,
}
struct TestState {
config: TestConfig,
local: Option<TestLocalValidator>,
validators: Vec<ValidatorPair>,
session_info: SessionInfo,
req_sender: async_channel::Sender<sc_network::config::IncomingRequest>,
node_features: NodeFeatures,
}
impl TestState {
fn from_config(
config: TestConfig,
req_sender: async_channel::Sender<sc_network::config::IncomingRequest>,
rng: &mut impl Rng,
) -> Self {
if config.group_size == 0 {
panic!("group size cannot be 0");
}
let mut validators = Vec::new();
let mut discovery_keys = Vec::new();
let mut assignment_keys = Vec::new();
let mut validator_groups = Vec::new();
let local_validator_pos = if let LocalRole::Validator = config.local_validator {
// ensure local validator is always in a full group.
Some(rng.gen_range(0..config.validator_count).saturating_sub(config.group_size - 1))
} else {
None
};
for i in 0..config.validator_count {
let validator_pair = if Some(i) == local_validator_pos {
// Note: the specific key is used to ensure the keystore holds
// this key and the subsystem can detect that it is a validator.
Sr25519Keyring::Ferdie.pair().into()
} else {
ValidatorPair::generate().0
};
let assignment_id = AssignmentPair::generate().0.public();
let discovery_id = AuthorityDiscoveryPair::generate().0.public();
let group_index = i / config.group_size;
validators.push(validator_pair);
discovery_keys.push(discovery_id);
assignment_keys.push(assignment_id);
if validator_groups.len() == group_index {
validator_groups.push(vec![ValidatorIndex(i as _)]);
} else {
validator_groups.last_mut().unwrap().push(ValidatorIndex(i as _));
}
}
let local = match (config.local_validator, local_validator_pos) {
(LocalRole::Validator, Some(local_pos)) => Some(TestLocalValidator {
validator_index: ValidatorIndex(local_pos as _),
group_index: Some(GroupIndex((local_pos / config.group_size) as _)),
}),
(LocalRole::InactiveValidator, None) => {
discovery_keys.push(AuthorityDiscoveryPair::generate().0.public());
Some(TestLocalValidator {
validator_index: ValidatorIndex(config.validator_count as u32),
group_index: None,
})
},
_ => None,
};
let validator_public = validator_pubkeys(&validators);
let session_info = SessionInfo {
validators: validator_public,
discovery_keys,
validator_groups: IndexedVec::from(validator_groups),
assignment_keys,
n_cores: 0,
zeroth_delay_tranche_width: 0,
relay_vrf_modulo_samples: 0,
n_delay_tranches: 0,
no_show_slots: 0,
needed_approvals: 0,
active_validator_indices: vec![],
dispute_period: 6,
random_seed: [0u8; 32],
};
let mut node_features = NodeFeatures::new();
if config.allow_v2_descriptors {
node_features.resize(FeatureIndex::FirstUnassigned as usize, false);
node_features.set(FeatureIndex::CandidateReceiptV2 as usize, true);
}
TestState { config, local, validators, session_info, req_sender, node_features }
}
fn make_dummy_leaf(&self, relay_parent: Hash) -> TestLeaf {
self.make_dummy_leaf_inner(relay_parent, 1, DEFAULT_SCHEDULING_LOOKAHEAD as usize)
}
fn make_dummy_leaf_inner(
&self,
relay_parent: Hash,
groups_for_first_para: usize,
scheduling_lookahead: usize,
) -> TestLeaf {
let mut cq = std::collections::BTreeMap::new();
for i in 0..self.session_info.validator_groups.len() {
if i < groups_for_first_para {
cq.entry(CoreIndex(i as u32)).or_insert_with(|| {
std::iter::repeat(ParaId::from(0u32)).take(scheduling_lookahead).collect()
});
} else {
cq.entry(CoreIndex(i as u32)).or_insert_with(|| {
std::iter::repeat(ParaId::from(i)).take(scheduling_lookahead).collect()
});
};
}
TestLeaf {
number: 1,
hash: relay_parent,
parent_hash: Hash::repeat_byte(0),
session: 1,
disabled_validators: Default::default(),
para_data: (0..self.session_info.validator_groups.len())
.map(|i| {
let para_id = if i < groups_for_first_para {
ParaId::from(0u32)
} else {
ParaId::from(i as u32)
};
(para_id, PerParaData::new(1, vec![1, 2, 3].into()))
})
.collect(),
minimum_backing_votes: 2,
claim_queue: ClaimQueueSnapshot(cq),
}
}
fn make_dummy_leaf_with_scheduling_lookahead(
&self,
relay_parent: Hash,
scheduling_lookahead: usize,
) -> TestLeaf {
self.make_dummy_leaf_inner(relay_parent, 1, scheduling_lookahead)
}
fn make_dummy_leaf_with_multiple_cores_per_para(
&self,
relay_parent: Hash,
groups_for_first_para: usize,
) -> TestLeaf {
self.make_dummy_leaf_inner(
relay_parent,
groups_for_first_para,
DEFAULT_SCHEDULING_LOOKAHEAD as usize,
)
}
fn make_dummy_leaf_with_disabled_validators(
&self,
relay_parent: Hash,
disabled_validators: Vec<ValidatorIndex>,
) -> TestLeaf {
TestLeaf { disabled_validators, ..self.make_dummy_leaf(relay_parent) }
}
fn make_dummy_leaf_with_min_backing_votes(
&self,
relay_parent: Hash,
minimum_backing_votes: u32,
) -> TestLeaf {
TestLeaf { minimum_backing_votes, ..self.make_dummy_leaf(relay_parent) }
}
fn make_dummy_topology(&self) -> NewGossipTopology {
let validator_count = self.config.validator_count;
let is_local_inactive = matches!(self.config.local_validator, LocalRole::InactiveValidator);
let mut indices: Vec<usize> = (0..validator_count).collect();
if is_local_inactive {
indices.push(validator_count);
}
NewGossipTopology {
session: 1,
topology: SessionGridTopology::new(
indices.clone(),
indices
.into_iter()
.map(|i| TopologyPeerInfo {
peer_ids: Vec::new(),
validator_index: ValidatorIndex(i as u32),
discovery_id: self.session_info.discovery_keys[i].clone(),
})
.collect(),
),
local_index: self.local.as_ref().map(|local| local.validator_index),
}
}
fn group_validators(
&self,
group_index: GroupIndex,
exclude_local: bool,
) -> Vec<ValidatorIndex> {
self.session_info
.validator_groups
.get(group_index)
.unwrap()
.iter()
.cloned()
.filter(|&i| {
self.local.as_ref().map_or(true, |l| !exclude_local || l.validator_index != i)
})
.collect()
}
fn index_within_group(
&self,
group_index: GroupIndex,
validator_index: ValidatorIndex,
) -> Option<usize> {
self.session_info
.validator_groups
.get(group_index)
.unwrap()
.iter()
.position(|&i| i == validator_index)
}
fn discovery_id(&self, validator_index: ValidatorIndex) -> AuthorityDiscoveryId {
self.session_info.discovery_keys[validator_index.0 as usize].clone()
}
fn sign_statement(
&self,
validator_index: ValidatorIndex,
statement: CompactStatement,
context: &SigningContext,
) -> SignedStatement {
let payload = statement.signing_payload(context);
let pair = &self.validators[validator_index.0 as usize];
let signature = pair.sign(&payload[..]);
SignedStatement::new(statement, validator_index, signature, context, &pair.public())
.unwrap()
}
fn sign_full_statement(
&self,
validator_index: ValidatorIndex,
statement: Statement,
context: &SigningContext,
pvd: PersistedValidationData,
) -> SignedFullStatementWithPVD {
let payload = statement.to_compact().signing_payload(context);
let pair = &self.validators[validator_index.0 as usize];
let signature = pair.sign(&payload[..]);
SignedFullStatementWithPVD::new(
statement.supply_pvd(pvd),
validator_index,
signature,
context,
&pair.public(),
)
.unwrap()
}
// send a request out, returning a future which expects a response.
async fn send_request(
&mut self,
peer: PeerId,
request: AttestedCandidateRequest,
) -> impl Future<Output = Option<sc_network::config::OutgoingResponse>> {
let (tx, rx) = futures::channel::oneshot::channel();
let req = sc_network::config::IncomingRequest {
peer,
payload: request.encode(),
pending_response: tx,
};
self.req_sender.send(req).await.unwrap();
rx.map(|r| r.ok())
}
}
fn test_harness<T: Future<Output = VirtualOverseer>>(
config: TestConfig,
test: impl FnOnce(TestState, VirtualOverseer) -> T,
) {
let pool = sp_core::testing::TaskExecutor::new();
let keystore = if let LocalRole::Validator = config.local_validator {
test_helpers::mock::make_ferdie_keystore()
} else {
Arc::new(LocalKeystore::in_memory()) as KeystorePtr
};
let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None);
let (candidate_req_receiver, req_cfg) = IncomingRequest::get_config_receiver::<
Block,
sc_network::NetworkWorker<Block, Hash>,
>(&req_protocol_names);
let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0);
let test_state = TestState::from_config(config, req_cfg.inbound_queue.unwrap(), &mut rng);
let (context, virtual_overseer) =
pezkuwi_node_subsystem_test_helpers::make_subsystem_context(pool.clone());
let subsystem = async move {
let subsystem = crate::StatementDistributionSubsystem {
keystore,
req_receiver: Some(candidate_req_receiver),
metrics: Default::default(),
reputation: ReputationAggregator::new(|_| true),
};
if let Err(e) = subsystem.run(context).await {
panic!("Fatal error: {:?}", e);
}
};
let test_fut = test(test_state, virtual_overseer);
futures::pin_mut!(test_fut);
futures::pin_mut!(subsystem);
futures::executor::block_on(future::join(
async move {
let mut virtual_overseer = test_fut.await;
// Ensure we have handled all responses.
if let Ok(Some(msg)) = virtual_overseer.rx.try_next() {
panic!("Did not handle all responses: {:?}", msg);
}
// Conclude.
virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
},
subsystem,
));
}
struct PerParaData {
min_relay_parent: BlockNumber,
head_data: HeadData,
}
impl PerParaData {
pub fn new(min_relay_parent: BlockNumber, head_data: HeadData) -> Self {
Self { min_relay_parent, head_data }
}
}
struct TestLeaf {
number: BlockNumber,
hash: Hash,
parent_hash: Hash,
session: SessionIndex,
pub disabled_validators: Vec<ValidatorIndex>,
para_data: Vec<(ParaId, PerParaData)>,
minimum_backing_votes: u32,
claim_queue: ClaimQueueSnapshot,
}
impl TestLeaf {
pub fn para_data(&self, para_id: ParaId) -> &PerParaData {
self.para_data
.iter()
.find_map(|(p_id, data)| if *p_id == para_id { Some(data) } else { None })
.unwrap()
}
}
struct TestSetupInfo {
local_validator: TestLocalValidator,
local_group: GroupIndex,
local_para: ParaId,
other_group: GroupIndex,
other_para: ParaId,
relay_parent: Hash,
test_leaf: TestLeaf,
peers: Vec<PeerId>,
validators: Vec<ValidatorIndex>,
}
struct TestPeerToConnect {
local: bool,
relay_parent_in_view: bool,
}
// TODO: Generalize, use in more places.
/// Sets up some test info that is common to most tests, and connects the requested peers.
async fn setup_test_and_connect_peers(
state: &TestState,
overseer: &mut VirtualOverseer,
validator_count: usize,
group_size: usize,
peers_to_connect: &[TestPeerToConnect],
send_topology_before_leaf: bool,
) -> TestSetupInfo {
let local_validator = state.local.clone().unwrap();
let local_group = local_validator.group_index.unwrap();
let local_para = ParaId::from(local_group.0);
let other_group = next_group_index(local_group, validator_count, group_size);
let other_para = ParaId::from(other_group.0);
let relay_parent = Hash::repeat_byte(1);
let test_leaf = state.make_dummy_leaf(relay_parent);
// Because we are testing grid mod, the "target" group (the one we communicate with) is usually
// other_group, a non-local group.
//
// TODO: change based on `LocalRole`?
let local_group_validators = state.group_validators(local_group, true);
let other_group_validators = state.group_validators(other_group, true);
let mut peers = vec![];
let mut validators = vec![];
let mut local_group_idx = 0;
let mut other_group_idx = 0;
for peer_to_connect in peers_to_connect {
let peer = PeerId::random();
peers.push(peer);
let v = if peer_to_connect.local {
let v = local_group_validators[local_group_idx];
local_group_idx += 1;
v
} else {
let v = other_group_validators[other_group_idx];
other_group_idx += 1;
v
};
validators.push(v);
connect_peer(overseer, peer, Some(vec![state.discovery_id(v)].into_iter().collect())).await;
if peer_to_connect.relay_parent_in_view {
send_peer_view_change(overseer, peer.clone(), view![relay_parent]).await;
}
}
// Send gossip topology and activate leaf.
if send_topology_before_leaf {
send_new_topology(overseer, state.make_dummy_topology()).await;
// Send cleaning up of a leaf to make sure it does not clear the save topology as well.
overseer
.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(
ActiveLeavesUpdate::stop_work(Hash::random()),
)))
.await;
activate_leaf(overseer, &test_leaf, &state, true, vec![]).await;
} else {
activate_leaf(overseer, &test_leaf, &state, true, vec![]).await;
send_new_topology(overseer, state.make_dummy_topology()).await;
}
TestSetupInfo {
local_validator,
local_group,
local_para,
other_group,
other_para,
test_leaf,
relay_parent,
peers,
validators,
}
}
async fn activate_leaf(
virtual_overseer: &mut VirtualOverseer,
leaf: &TestLeaf,
test_state: &TestState,
is_new_session: bool,
hypothetical_memberships: Vec<(HypotheticalCandidate, HypotheticalMembership)>,
) {
let activated = new_leaf(leaf.hash, leaf.number);
virtual_overseer
.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(
activated,
))))
.await;
handle_leaf_activation(
virtual_overseer,
leaf,
test_state,
is_new_session,
hypothetical_memberships,
)
.await;
}
async fn handle_leaf_activation(
virtual_overseer: &mut VirtualOverseer,
leaf: &TestLeaf,
test_state: &TestState,
is_new_session: bool,
hypothetical_memberships: Vec<(HypotheticalCandidate, HypotheticalMembership)>,
) {
let TestLeaf {
number,
hash,
parent_hash,
para_data,
session,
disabled_validators,
minimum_backing_votes,
claim_queue,
} = leaf;
let header = Header {
parent_hash: *parent_hash,
number: *number,
state_root: Hash::zero(),
extrinsics_root: Hash::zero(),
digest: Default::default(),
};
assert_matches!(
virtual_overseer.recv().await,
AllMessages::ChainApi(
ChainApiMessage::BlockHeader(parent, tx)
) if parent == *hash => {
tx.send(Ok(Some(header))).unwrap();
}
);
let mrp_response: Vec<(ParaId, BlockNumber)> = para_data
.iter()
.map(|(para_id, data)| (*para_id, data.min_relay_parent))
.collect();
assert_matches!(
virtual_overseer.recv().await,
AllMessages::ProspectiveTeyrchains(
ProspectiveTeyrchainsMessage::GetMinimumRelayParents(parent, tx)
) if parent == *hash => {
tx.send(mrp_response).unwrap();
}
);
loop {
match virtual_overseer.recv().await {
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::Version(tx),
)) => {
tx.send(Ok(RuntimeApiRequest::CLAIM_QUEUE_RUNTIME_REQUIREMENT)).unwrap();
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
parent,
RuntimeApiRequest::DisabledValidators(tx),
)) if parent == *hash => {
tx.send(Ok(disabled_validators.clone())).unwrap();
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_parent,
RuntimeApiRequest::DisabledValidators(tx),
)) => {
tx.send(Ok(Vec::new())).unwrap();
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
_parent, // assume all active leaves are in the same session
RuntimeApiRequest::SessionIndexForChild(tx),
)) => {
tx.send(Ok(*session)).unwrap();
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
parent,
RuntimeApiRequest::SessionInfo(s, tx),
)) if parent == *hash && s == *session => {
assert!(is_new_session, "only expecting this call in a new session");
tx.send(Ok(Some(test_state.session_info.clone()))).unwrap();
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
parent,
RuntimeApiRequest::MinimumBackingVotes(session_index, tx),
)) if parent == *hash && session_index == *session => {
assert!(is_new_session, "only expecting this call in a new session");
tx.send(Ok(*minimum_backing_votes)).unwrap();
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
parent,
RuntimeApiRequest::ValidatorGroups(tx),
)) if parent == *hash => {
let validator_groups = test_state.session_info.validator_groups.to_vec();
let group_rotation_info = GroupRotationInfo {
session_start_block: 1,
group_rotation_frequency: 12,
now: 1,
};
tx.send(Ok((validator_groups, group_rotation_info))).unwrap();
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
parent,
RuntimeApiRequest::NodeFeatures(_session_index, tx),
)) if parent == *hash => {
tx.send(Ok(test_state.node_features.clone())).unwrap();
},
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
parent,
RuntimeApiRequest::ClaimQueue(tx),
)) if parent == *hash => {
tx.send(Ok(claim_queue.0.clone())).unwrap();
},
AllMessages::ProspectiveTeyrchains(
ProspectiveTeyrchainsMessage::GetHypotheticalMembership(req, tx),
) => {
assert_eq!(req.fragment_chain_relay_parent, Some(*hash));
for (i, (candidate, _)) in hypothetical_memberships.iter().enumerate() {
assert!(
req.candidates.iter().any(|c| &c == &candidate),
"did not receive request for hypothetical candidate {}",
i,
);
}
tx.send(hypothetical_memberships).unwrap();
// this is the last expected runtime api call
break;
},
msg => panic!("unexpected runtime API call: {msg:?}"),
}
}
}
/// Intercepts an outgoing request, checks the fields, and sends the response.
async fn handle_sent_request(
virtual_overseer: &mut VirtualOverseer,
peer: PeerId,
candidate_hash: CandidateHash,
mask: StatementFilter,
candidate_receipt: CommittedCandidateReceipt,
persisted_validation_data: PersistedValidationData,
statements: Vec<UncheckedSignedStatement>,
) {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => {
assert_eq!(requests.len(), 1);
assert_matches!(
requests.pop().unwrap(),
Requests::AttestedCandidateV2(outgoing) => {
assert_eq!(outgoing.peer, Recipient::Peer(peer));
assert_eq!(outgoing.payload.candidate_hash, candidate_hash);
assert_eq!(outgoing.payload.mask, mask);
let res = AttestedCandidateResponse {
candidate_receipt,
persisted_validation_data,
statements,
};
outgoing.pending_response.send(Ok((res.encode(), ProtocolName::from("")))).unwrap();
}
);
}
);
}
async fn answer_expected_hypothetical_membership_request(
virtual_overseer: &mut VirtualOverseer,
responses: Vec<(HypotheticalCandidate, HypotheticalMembership)>,
) {
assert_matches!(
virtual_overseer.recv().await,
AllMessages::ProspectiveTeyrchains(
ProspectiveTeyrchainsMessage::GetHypotheticalMembership(req, tx)
) => {
assert_eq!(req.fragment_chain_relay_parent, None);
for (i, (candidate, _)) in responses.iter().enumerate() {
assert!(
req.candidates.iter().any(|c| &c == &candidate),
"did not receive request for hypothetical candidate {}",
i,
);
}
tx.send(responses).unwrap();
}
)
}
/// Assert that the correct peer is reported.
#[macro_export]
macro_rules! assert_peer_reported {
($virtual_overseer:expr, $peer_id:expr, $rep_change:expr $(,)*) => {
assert_matches!(
$virtual_overseer.recv().await,
AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r)))
if p == $peer_id && r == $rep_change.into()
);
}
}
async fn send_share_message(
virtual_overseer: &mut VirtualOverseer,
relay_parent: Hash,
statement: SignedFullStatementWithPVD,
) {
virtual_overseer
.send(FromOrchestra::Communication {
msg: StatementDistributionMessage::Share(relay_parent, statement),
})
.await;
}
async fn send_backed_message(
virtual_overseer: &mut VirtualOverseer,
candidate_hash: CandidateHash,
) {
virtual_overseer
.send(FromOrchestra::Communication {
msg: StatementDistributionMessage::Backed(candidate_hash),
})
.await;
}
async fn send_manifest_from_peer(
virtual_overseer: &mut VirtualOverseer,
peer_id: PeerId,
manifest: BackedCandidateManifest,
) {
send_peer_message(
virtual_overseer,
peer_id,
protocol_v3::StatementDistributionMessage::BackedCandidateManifest(manifest),
)
.await;
}
async fn send_ack_from_peer(
virtual_overseer: &mut VirtualOverseer,
peer_id: PeerId,
ack: BackedCandidateAcknowledgement,
) {
send_peer_message(
virtual_overseer,
peer_id,
protocol_v3::StatementDistributionMessage::BackedCandidateKnown(ack),
)
.await;
}
fn validator_pubkeys(val_ids: &[ValidatorPair]) -> IndexedVec<ValidatorIndex, ValidatorId> {
val_ids.iter().map(|v| v.public().into()).collect()
}
async fn connect_peer(
virtual_overseer: &mut VirtualOverseer,
peer: PeerId,
authority_ids: Option<HashSet<AuthorityDiscoveryId>>,
) {
virtual_overseer
.send(FromOrchestra::Communication {
msg: StatementDistributionMessage::NetworkBridgeUpdate(
NetworkBridgeEvent::PeerConnected(
peer,
ObservedRole::Authority,
ValidationVersion::V3.into(),
authority_ids,
),
),
})
.await;
}
// TODO: Add some tests using this?
#[allow(dead_code)]
async fn disconnect_peer(virtual_overseer: &mut VirtualOverseer, peer: PeerId) {
virtual_overseer
.send(FromOrchestra::Communication {
msg: StatementDistributionMessage::NetworkBridgeUpdate(
NetworkBridgeEvent::PeerDisconnected(peer),
),
})
.await;
}
async fn send_peer_view_change(virtual_overseer: &mut VirtualOverseer, peer: PeerId, view: View) {
virtual_overseer
.send(FromOrchestra::Communication {
msg: StatementDistributionMessage::NetworkBridgeUpdate(
NetworkBridgeEvent::PeerViewChange(peer, view),
),
})
.await;
}
async fn send_peer_message(
virtual_overseer: &mut VirtualOverseer,
peer: PeerId,
message: protocol_v3::StatementDistributionMessage,
) {
virtual_overseer
.send(FromOrchestra::Communication {
msg: StatementDistributionMessage::NetworkBridgeUpdate(
NetworkBridgeEvent::PeerMessage(peer, ValidationProtocols::V3(message)),
),
})
.await;
}
async fn send_new_topology(virtual_overseer: &mut VirtualOverseer, topology: NewGossipTopology) {
virtual_overseer
.send(FromOrchestra::Communication {
msg: StatementDistributionMessage::NetworkBridgeUpdate(
NetworkBridgeEvent::NewGossipTopology(topology),
),
})
.await;
}
async fn overseer_recv_with_timeout(
overseer: &mut VirtualOverseer,
timeout: Duration,
) -> Option<AllMessages> {
gum::trace!("waiting for message...");
overseer.recv().timeout(timeout).await
}
fn next_group_index(
group_index: GroupIndex,
validator_count: usize,
group_size: usize,
) -> GroupIndex {
let next_group = group_index.0 + 1;
let num_groups =
validator_count / group_size + if validator_count % group_size > 0 { 1 } else { 0 };
GroupIndex::from(next_group % num_groups as u32)
}
File diff suppressed because it is too large Load Diff