feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit e4778b4576
6838 changed files with 1847450 additions and 0 deletions
@@ -0,0 +1,878 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! The Approval Voting Parallel Subsystem.
//!
//! This subsystem is responsible for orchestrating the work done by
//! approval-voting and approval-distribution subsystem, so they can
//! do their work in parallel, rather than serially, when they are run
//! as independent subsystems.
use itertools::Itertools;
use metrics::{Meters, MetricsWatcher};
use pezkuwi_node_core_approval_voting::{Config, RealAssignmentCriteria};
use pezkuwi_node_metrics::metered::{
self, channel, unbounded, MeteredReceiver, MeteredSender, UnboundedMeteredReceiver,
UnboundedMeteredSender,
};
use pezkuwi_node_primitives::{
approval::time::{Clock, SystemClock},
DISPUTE_WINDOW,
};
use pezkuwi_node_subsystem::{
messages::{ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage},
overseer, FromOrchestra, SpawnedSubsystem, SubsystemError, SubsystemResult,
};
use pezkuwi_node_subsystem_util::{
self,
database::Database,
runtime::{Config as RuntimeInfoConfig, RuntimeInfo},
};
use pezkuwi_overseer::{OverseerSignal, Priority, SubsystemSender, TimeoutExt};
use pezkuwi_primitives::{CandidateIndex, Hash, ValidatorIndex, ValidatorSignature};
use rand::SeedableRng;
use sc_keystore::LocalKeystore;
use sp_consensus::SyncOracle;
use futures::{channel::oneshot, prelude::*, StreamExt};
pub use metrics::Metrics;
use pezkuwi_node_core_approval_voting::{
approval_db::common::Config as DatabaseConfig, ApprovalVotingWorkProvider,
};
use std::{
collections::{HashMap, HashSet},
fmt::Debug,
sync::Arc,
time::Duration,
};
use stream::{select_with_strategy, PollNext, SelectWithStrategy};
pub mod metrics;
#[cfg(test)]
mod tests;
pub(crate) const LOG_TARGET: &str = "teyrchain::approval-voting-parallel";
// Value rather arbitrarily: Should not be hit in practice, it exists to more easily diagnose dead
// lock issues for example.
const WAIT_FOR_SIGS_GATHER_TIMEOUT: Duration = Duration::from_millis(2000);
/// The number of workers used for running the approval-distribution logic.
pub const APPROVAL_DISTRIBUTION_WORKER_COUNT: usize = 4;
/// The default channel size for the workers, can be overridden by the user through
/// `overseer_channel_capacity_override`
pub const DEFAULT_WORKERS_CHANNEL_SIZE: usize = 64000 / APPROVAL_DISTRIBUTION_WORKER_COUNT;
fn prio_right<'a>(_val: &'a mut ()) -> PollNext {
PollNext::Right
}
/// The approval voting parallel subsystem.
pub struct ApprovalVotingParallelSubsystem {
/// `LocalKeystore` is needed for assignment keys, but not necessarily approval keys.
///
/// We do a lot of VRF signing and need the keys to have low latency.
keystore: Arc<LocalKeystore>,
db_config: DatabaseConfig,
slot_duration_millis: u64,
db: Arc<dyn Database>,
sync_oracle: Box<dyn SyncOracle + Send>,
metrics: Metrics,
spawner: Arc<dyn overseer::gen::Spawner + 'static>,
clock: Arc<dyn Clock + Send + Sync>,
overseer_message_channel_capacity_override: Option<usize>,
}
impl ApprovalVotingParallelSubsystem {
/// Create a new approval voting subsystem with the given keystore, config, and database.
pub fn with_config(
config: Config,
db: Arc<dyn Database>,
keystore: Arc<LocalKeystore>,
sync_oracle: Box<dyn SyncOracle + Send>,
metrics: Metrics,
spawner: impl overseer::gen::Spawner + 'static + Clone,
overseer_message_channel_capacity_override: Option<usize>,
) -> Self {
ApprovalVotingParallelSubsystem::with_config_and_clock(
config,
db,
keystore,
sync_oracle,
metrics,
Arc::new(SystemClock {}),
spawner,
overseer_message_channel_capacity_override,
)
}
/// Create a new approval voting subsystem with the given keystore, config, clock, and database.
pub fn with_config_and_clock(
config: Config,
db: Arc<dyn Database>,
keystore: Arc<LocalKeystore>,
sync_oracle: Box<dyn SyncOracle + Send>,
metrics: Metrics,
clock: Arc<dyn Clock + Send + Sync>,
spawner: impl overseer::gen::Spawner + 'static,
overseer_message_channel_capacity_override: Option<usize>,
) -> Self {
ApprovalVotingParallelSubsystem {
keystore,
slot_duration_millis: config.slot_duration_millis,
db,
db_config: DatabaseConfig { col_approval_data: config.col_approval_data },
sync_oracle,
metrics,
spawner: Arc::new(spawner),
clock,
overseer_message_channel_capacity_override,
}
}
/// The size of the channel used for the workers.
fn workers_channel_size(&self) -> usize {
self.overseer_message_channel_capacity_override
.unwrap_or(DEFAULT_WORKERS_CHANNEL_SIZE)
}
}
#[overseer::subsystem(ApprovalVotingParallel, error = SubsystemError, prefix = self::overseer)]
impl<Context: Send> ApprovalVotingParallelSubsystem {
fn start(self, ctx: Context) -> SpawnedSubsystem {
let future = run::<Context>(ctx, self)
.map_err(|e| SubsystemError::with_origin("approval-voting-parallel", e))
.boxed();
SpawnedSubsystem { name: "approval-voting-parallel-subsystem", future }
}
}
// It starts worker for the approval voting subsystem and the `APPROVAL_DISTRIBUTION_WORKER_COUNT`
// workers for the approval distribution subsystem.
//
// It returns handles that can be used to send messages to the workers.
#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)]
async fn start_workers<Context>(
ctx: &mut Context,
subsystem: ApprovalVotingParallelSubsystem,
metrics_watcher: &mut MetricsWatcher,
) -> SubsystemResult<(ToWorker<ApprovalVotingMessage>, Vec<ToWorker<ApprovalDistributionMessage>>)>
where
{
gum::info!(target: LOG_TARGET, "Starting approval distribution workers");
// Build approval voting handles.
let (to_approval_voting_worker, approval_voting_work_provider) = build_worker_handles(
"approval-voting-parallel-db".into(),
subsystem.workers_channel_size(),
metrics_watcher,
prio_right,
);
let mut to_approval_distribution_workers = Vec::new();
let slot_duration_millis = subsystem.slot_duration_millis;
for i in 0..APPROVAL_DISTRIBUTION_WORKER_COUNT {
let mut network_sender = ctx.sender().clone();
let mut runtime_api_sender = ctx.sender().clone();
let mut approval_distribution_to_approval_voting = to_approval_voting_worker.clone();
let approval_distr_instance =
pezkuwi_approval_distribution::ApprovalDistribution::new_with_clock(
subsystem.metrics.approval_distribution_metrics(),
subsystem.slot_duration_millis,
subsystem.clock.clone(),
Arc::new(RealAssignmentCriteria {}),
);
let task_name = format!("approval-voting-parallel-{}", i);
let (to_approval_distribution_worker, mut approval_distribution_work_provider) =
build_worker_handles(
task_name.clone(),
subsystem.workers_channel_size(),
metrics_watcher,
prio_right,
);
metrics_watcher.watch(task_name.clone(), to_approval_distribution_worker.meter());
subsystem.spawner.spawn_blocking(
task_name.leak(),
Some("approval-voting-parallel"),
Box::pin(async move {
let mut state =
pezkuwi_approval_distribution::State::with_config(slot_duration_millis);
let mut rng = rand::rngs::StdRng::from_entropy();
let mut session_info_provider = RuntimeInfo::new_with_config(RuntimeInfoConfig {
keystore: None,
session_cache_lru_size: DISPUTE_WINDOW.get(),
});
loop {
let message = match approval_distribution_work_provider.next().await {
Some(message) => message,
None => {
gum::info!(
target: LOG_TARGET,
"Approval distribution stream finished, most likely shutting down",
);
break;
},
};
if approval_distr_instance
.handle_from_orchestra(
message,
&mut approval_distribution_to_approval_voting,
&mut network_sender,
&mut runtime_api_sender,
&mut state,
&mut rng,
&mut session_info_provider,
)
.await
{
gum::info!(
target: LOG_TARGET,
"Approval distribution worker {}, exiting because of shutdown", i
);
};
}
}),
);
to_approval_distribution_workers.push(to_approval_distribution_worker);
}
gum::info!(target: LOG_TARGET, "Starting approval voting workers");
let sender = ctx.sender().clone();
let to_approval_distribution = ApprovalVotingToApprovalDistribution(sender.clone());
pezkuwi_node_core_approval_voting::start_approval_worker(
approval_voting_work_provider,
sender.clone(),
to_approval_distribution,
pezkuwi_node_core_approval_voting::Config {
slot_duration_millis: subsystem.slot_duration_millis,
col_approval_data: subsystem.db_config.col_approval_data,
},
subsystem.db.clone(),
subsystem.keystore.clone(),
subsystem.sync_oracle,
subsystem.metrics.approval_voting_metrics(),
subsystem.spawner.clone(),
"approval-voting-parallel-db",
"approval-voting-parallel",
subsystem.clock.clone(),
)
.await?;
Ok((to_approval_voting_worker, to_approval_distribution_workers))
}
// The main run function of the approval parallel voting subsystem.
#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)]
async fn run<Context>(
mut ctx: Context,
subsystem: ApprovalVotingParallelSubsystem,
) -> SubsystemResult<()> {
let mut metrics_watcher = MetricsWatcher::new(subsystem.metrics.clone());
gum::info!(
target: LOG_TARGET,
"Starting workers"
);
let (to_approval_voting_worker, to_approval_distribution_workers) =
start_workers(&mut ctx, subsystem, &mut metrics_watcher).await?;
gum::info!(
target: LOG_TARGET,
"Starting main subsystem loop"
);
run_main_loop(ctx, to_approval_voting_worker, to_approval_distribution_workers, metrics_watcher)
.await
}
// Main loop of the subsystem, it shouldn't include any logic just dispatching of messages to
// the workers.
//
// It listens for messages from the overseer and dispatches them to the workers.
#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)]
async fn run_main_loop<Context>(
mut ctx: Context,
mut to_approval_voting_worker: ToWorker<ApprovalVotingMessage>,
mut to_approval_distribution_workers: Vec<ToWorker<ApprovalDistributionMessage>>,
metrics_watcher: MetricsWatcher,
) -> SubsystemResult<()> {
loop {
futures::select! {
next_msg = ctx.recv().fuse() => {
let next_msg = match next_msg {
Ok(msg) => msg,
Err(err) => {
gum::info!(target: LOG_TARGET, ?err, "Approval voting parallel subsystem received an error");
return Err(err);
}
};
match next_msg {
FromOrchestra::Signal(msg) => {
if matches!(msg, OverseerSignal::ActiveLeaves(_)) {
metrics_watcher.collect_metrics();
}
for worker in to_approval_distribution_workers.iter_mut() {
worker
.send_signal(msg.clone()).await?;
}
to_approval_voting_worker.send_signal(msg.clone()).await?;
if matches!(msg, OverseerSignal::Conclude) {
break;
}
},
FromOrchestra::Communication { msg } => match msg {
// The message the approval voting subsystem would've handled.
ApprovalVotingParallelMessage::ApprovedAncestor(_, _,_) |
ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate(_, _) => {
to_approval_voting_worker.send_message_with_priority::<overseer::HighPriority>(
msg.try_into().expect(
"Message is one of ApprovedAncestor, GetApprovalSignaturesForCandidate
and that can be safely converted to ApprovalVotingMessage; qed"
)
).await;
},
// Now the message the approval distribution subsystem would've handled and need to
// be forwarded to the workers.
ApprovalVotingParallelMessage::NewBlocks(msg) => {
for worker in to_approval_distribution_workers.iter_mut() {
worker
.send_message(
ApprovalDistributionMessage::NewBlocks(msg.clone()),
)
.await;
}
},
ApprovalVotingParallelMessage::DistributeAssignment(assignment, claimed) => {
let worker = assigned_worker_for_validator(assignment.validator, &mut to_approval_distribution_workers);
worker
.send_message(
ApprovalDistributionMessage::DistributeAssignment(assignment, claimed)
)
.await;
},
ApprovalVotingParallelMessage::DistributeApproval(vote) => {
let worker = assigned_worker_for_validator(vote.validator, &mut to_approval_distribution_workers);
worker
.send_message(
ApprovalDistributionMessage::DistributeApproval(vote)
).await;
},
ApprovalVotingParallelMessage::NetworkBridgeUpdate(msg) => {
if let pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerMessage(
peer_id,
msg,
) = msg
{
let (all_msgs_from_same_validator, messages_split_by_validator) = validator_index_for_msg(msg);
for (validator_index, msg) in all_msgs_from_same_validator.into_iter().chain(messages_split_by_validator.into_iter().flatten()) {
let worker = assigned_worker_for_validator(validator_index, &mut to_approval_distribution_workers);
worker
.send_message(
ApprovalDistributionMessage::NetworkBridgeUpdate(
pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerMessage(
peer_id, msg,
),
),
).await;
}
} else {
for worker in to_approval_distribution_workers.iter_mut() {
worker
.send_message_with_priority::<overseer::HighPriority>(
ApprovalDistributionMessage::NetworkBridgeUpdate(msg.clone()),
).await;
}
}
},
ApprovalVotingParallelMessage::GetApprovalSignatures(indices, tx) => {
handle_get_approval_signatures(&mut ctx, &mut to_approval_distribution_workers, indices, tx).await;
},
ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(lag) => {
for worker in to_approval_distribution_workers.iter_mut() {
worker
.send_message(
ApprovalDistributionMessage::ApprovalCheckingLagUpdate(lag)
).await;
}
},
},
};
},
};
}
Ok(())
}
// It sends a message to all approval workers to get the approval signatures for the requested
// candidates and then merges them all together and sends them back to the requester.
#[overseer::contextbounds(ApprovalVotingParallel, prefix = self::overseer)]
async fn handle_get_approval_signatures<Context>(
ctx: &mut Context,
to_approval_distribution_workers: &mut Vec<ToWorker<ApprovalDistributionMessage>>,
requested_candidates: HashSet<(Hash, CandidateIndex)>,
result_channel: oneshot::Sender<
HashMap<ValidatorIndex, (Hash, Vec<CandidateIndex>, ValidatorSignature)>,
>,
) {
let mut sigs = HashMap::new();
let mut signatures_channels = Vec::new();
for worker in to_approval_distribution_workers.iter_mut() {
let (tx, rx) = oneshot::channel();
worker.send_unbounded_message(ApprovalDistributionMessage::GetApprovalSignatures(
requested_candidates.clone(),
tx,
));
signatures_channels.push(rx);
}
let gather_signatures = async move {
let Some(results) = futures::future::join_all(signatures_channels)
.timeout(WAIT_FOR_SIGS_GATHER_TIMEOUT)
.await
else {
gum::warn!(
target: LOG_TARGET,
"Waiting for approval signatures timed out - dead lock?"
);
return;
};
for result in results {
let worker_sigs = match result {
Ok(sigs) => sigs,
Err(_) => {
gum::error!(
target: LOG_TARGET,
"Getting approval signatures failed, oneshot got closed"
);
continue;
},
};
sigs.extend(worker_sigs);
}
if let Err(_) = result_channel.send(sigs) {
gum::debug!(
target: LOG_TARGET,
"Sending back approval signatures failed, oneshot got closed"
);
}
};
if let Err(err) = ctx.spawn("approval-voting-gather-signatures", Box::pin(gather_signatures)) {
gum::warn!(target: LOG_TARGET, "Failed to spawn gather signatures task: {:?}", err);
}
}
// Returns the worker that should receive the message for the given validator.
fn assigned_worker_for_validator(
validator: ValidatorIndex,
to_approval_distribution_workers: &mut Vec<ToWorker<ApprovalDistributionMessage>>,
) -> &mut ToWorker<ApprovalDistributionMessage> {
let worker_index = validator.0 as usize % to_approval_distribution_workers.len();
to_approval_distribution_workers
.get_mut(worker_index)
.expect("Worker index is obtained modulo len; qed")
}
// Returns the validators that initially created this assignments/votes, the validator index
// is later used to decide which approval-distribution worker should receive the message.
//
// Because this is on the hot path and we don't want to be unnecessarily slow, it contains two logic
// paths. The ultra fast path where all messages have the same validator index and we don't do
// any cloning or allocation and the path where we need to split the messages into multiple
// messages, because they have different validator indices, where we do need to clone and allocate.
// In practice most of the message will fall on the ultra fast path.
fn validator_index_for_msg(
msg: pezkuwi_node_network_protocol::ApprovalDistributionMessage,
) -> (
Option<(ValidatorIndex, pezkuwi_node_network_protocol::ApprovalDistributionMessage)>,
Option<Vec<(ValidatorIndex, pezkuwi_node_network_protocol::ApprovalDistributionMessage)>>,
) {
match msg {
pezkuwi_node_network_protocol::ValidationProtocols::V3(ref message) => match message {
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(msgs) =>
if let Ok(validator) = msgs.iter().map(|(msg, _)| msg.validator).all_equal_value() {
(Some((validator, msg)), None)
} else {
let split = msgs
.iter()
.map(|(msg, claimed_candidates)| {
(
msg.validator,
pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(
vec![(msg.clone(), claimed_candidates.clone())]
),
),
)
})
.collect_vec();
(None, Some(split))
},
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(msgs) =>
if let Ok(validator) = msgs.iter().map(|msg| msg.validator).all_equal_value() {
(Some((validator, msg)), None)
} else {
let split = msgs
.iter()
.map(|vote| {
(
vote.validator,
pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(
vec![vote.clone()]
),
),
)
})
.collect_vec();
(None, Some(split))
},
},
}
}
/// A handler object that both type of workers use for receiving work.
///
/// In practive this is just a wrapper over two channels Receiver, that is injected into
/// approval-voting worker and approval-distribution workers.
type WorkProvider<M, Clos, State> = WorkProviderImpl<
SelectWithStrategy<
MeteredReceiver<FromOrchestra<M>>,
UnboundedMeteredReceiver<FromOrchestra<M>>,
Clos,
State,
>,
>;
pub struct WorkProviderImpl<T>(T);
impl<T, M> Stream for WorkProviderImpl<T>
where
T: Stream<Item = FromOrchestra<M>> + Unpin + Send,
{
type Item = FromOrchestra<M>;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
self.0.poll_next_unpin(cx)
}
}
#[async_trait::async_trait]
impl<T> ApprovalVotingWorkProvider for WorkProviderImpl<T>
where
T: Stream<Item = FromOrchestra<ApprovalVotingMessage>> + Unpin + Send,
{
async fn recv(&mut self) -> SubsystemResult<FromOrchestra<ApprovalVotingMessage>> {
self.0.next().await.ok_or(SubsystemError::Context(
"ApprovalVotingWorkProviderImpl: Channel closed".to_string(),
))
}
}
impl<M, Clos, State> WorkProvider<M, Clos, State>
where
M: Send + Sync + 'static,
Clos: FnMut(&mut State) -> PollNext,
State: Default,
{
// Constructs a work providers from the channels handles.
fn from_rx_worker(rx: RxWorker<M>, prio: Clos) -> Self {
let prioritised = select_with_strategy(rx.0, rx.1, prio);
WorkProviderImpl(prioritised)
}
}
/// Just a wrapper for implementing `overseer::SubsystemSender<ApprovalVotingMessage>` and
/// `overseer::SubsystemSender<ApprovalDistributionMessage>`.
///
/// The instance of this struct can be injected into the workers, so they can talk
/// directly with each other without intermediating in this subsystem loop.
pub struct ToWorker<T: Send + Sync + 'static>(
MeteredSender<FromOrchestra<T>>,
UnboundedMeteredSender<FromOrchestra<T>>,
);
impl<T: Send + Sync + 'static> Clone for ToWorker<T> {
fn clone(&self) -> Self {
Self(self.0.clone(), self.1.clone())
}
}
impl<T: Send + Sync + 'static> ToWorker<T> {
async fn send_signal(&mut self, signal: OverseerSignal) -> Result<(), SubsystemError> {
self.1
.unbounded_send(FromOrchestra::Signal(signal))
.map_err(|err| SubsystemError::QueueError(err.into_send_error()))
}
fn meter(&self) -> Meters {
Meters::new(self.0.meter(), self.1.meter())
}
}
impl<T: Send + Sync + 'static + Debug> overseer::SubsystemSender<T> for ToWorker<T> {
fn send_message<'life0, 'async_trait>(
&'life0 mut self,
msg: T,
) -> ::core::pin::Pin<
Box<dyn ::core::future::Future<Output = ()> + ::core::marker::Send + 'async_trait>,
>
where
'life0: 'async_trait,
Self: 'async_trait,
{
async {
if let Err(err) =
self.0.send(pezkuwi_overseer::FromOrchestra::Communication { msg }).await
{
gum::error!(
target: LOG_TARGET,
"Failed to send message to approval voting worker: {:?}, subsystem is probably shutting down.",
err
);
}
}
.boxed()
}
fn try_send_message(&mut self, msg: T) -> Result<(), metered::TrySendError<T>> {
self.0
.try_send(pezkuwi_overseer::FromOrchestra::Communication { msg })
.map_err(|result| {
let is_full = result.is_full();
let msg = match result.into_inner() {
pezkuwi_overseer::FromOrchestra::Signal(_) => {
panic!("Cannot happen variant is never built")
},
pezkuwi_overseer::FromOrchestra::Communication { msg } => msg,
};
if is_full {
metered::TrySendError::Full(msg)
} else {
metered::TrySendError::Closed(msg)
}
})
}
fn send_messages<'life0, 'async_trait, I>(
&'life0 mut self,
msgs: I,
) -> ::core::pin::Pin<
Box<dyn ::core::future::Future<Output = ()> + ::core::marker::Send + 'async_trait>,
>
where
I: IntoIterator<Item = T> + Send,
I::IntoIter: Send,
I: 'async_trait,
'life0: 'async_trait,
Self: 'async_trait,
{
async {
for msg in msgs {
self.send_message(msg).await;
}
}
.boxed()
}
fn send_unbounded_message(&mut self, msg: T) {
if let Err(err) =
self.1.unbounded_send(pezkuwi_overseer::FromOrchestra::Communication { msg })
{
gum::error!(
target: LOG_TARGET,
"Failed to send unbounded message to approval voting worker: {:?}, subsystem is probably shutting down.",
err
);
}
}
fn send_message_with_priority<'life0, 'async_trait, P>(
&'life0 mut self,
msg: T,
) -> ::core::pin::Pin<
Box<dyn ::core::future::Future<Output = ()> + ::core::marker::Send + 'async_trait>,
>
where
P: 'async_trait + Priority,
'life0: 'async_trait,
Self: 'async_trait,
{
match P::priority() {
pezkuwi_overseer::PriorityLevel::Normal => self.send_message(msg),
pezkuwi_overseer::PriorityLevel::High =>
async { self.send_unbounded_message(msg) }.boxed(),
}
}
fn try_send_message_with_priority<P: Priority>(
&mut self,
msg: T,
) -> Result<(), metered::TrySendError<T>> {
match P::priority() {
pezkuwi_overseer::PriorityLevel::Normal => self.try_send_message(msg),
pezkuwi_overseer::PriorityLevel::High => Ok(self.send_unbounded_message(msg)),
}
}
}
/// Handles that are used by an worker to receive work.
pub struct RxWorker<T: Send + Sync + 'static>(
MeteredReceiver<FromOrchestra<T>>,
UnboundedMeteredReceiver<FromOrchestra<T>>,
);
// Build all the necessary channels for sending messages to an worker
// and for the worker to receive them.
fn build_channels<T: Send + Sync + 'static>(
channel_name: String,
channel_size: usize,
metrics_watcher: &mut MetricsWatcher,
) -> (ToWorker<T>, RxWorker<T>) {
let (tx_work, rx_work) = channel::<FromOrchestra<T>>(channel_size);
let (tx_work_unbounded, rx_work_unbounded) = unbounded::<FromOrchestra<T>>();
let to_worker = ToWorker(tx_work, tx_work_unbounded);
metrics_watcher.watch(channel_name, to_worker.meter());
(to_worker, RxWorker(rx_work, rx_work_unbounded))
}
/// Build the worker handles used for interacting with the workers.
///
/// `ToWorker` is used for sending messages to the workers.
/// `WorkProvider` is used by the workers for receiving the messages.
fn build_worker_handles<M, Clos, State>(
channel_name: String,
channel_size: usize,
metrics_watcher: &mut MetricsWatcher,
prio_right: Clos,
) -> (ToWorker<M>, WorkProvider<M, Clos, State>)
where
M: Send + Sync + 'static,
Clos: FnMut(&mut State) -> PollNext,
State: Default,
{
let (to_worker, rx_worker) = build_channels(channel_name, channel_size, metrics_watcher);
(to_worker, WorkProviderImpl::from_rx_worker(rx_worker, prio_right))
}
/// Just a wrapper for implementing `overseer::SubsystemSender<ApprovalDistributionMessage>`, so
/// that we can inject into the approval voting subsystem.
#[derive(Clone)]
pub struct ApprovalVotingToApprovalDistribution<S: SubsystemSender<ApprovalVotingParallelMessage>>(
S,
);
impl<S: SubsystemSender<ApprovalVotingParallelMessage>>
overseer::SubsystemSender<ApprovalDistributionMessage>
for ApprovalVotingToApprovalDistribution<S>
{
#[allow(clippy::type_complexity, clippy::type_repetition_in_bounds)]
fn send_message<'life0, 'async_trait>(
&'life0 mut self,
msg: ApprovalDistributionMessage,
) -> ::core::pin::Pin<
Box<dyn ::core::future::Future<Output = ()> + ::core::marker::Send + 'async_trait>,
>
where
'life0: 'async_trait,
Self: 'async_trait,
{
self.0.send_message(msg.into())
}
fn try_send_message(
&mut self,
msg: ApprovalDistributionMessage,
) -> Result<(), metered::TrySendError<ApprovalDistributionMessage>> {
self.0.try_send_message(msg.into()).map_err(|err| match err {
// Safe to unwrap because it was built from the same type.
metered::TrySendError::Closed(msg) =>
metered::TrySendError::Closed(msg.try_into().unwrap()),
metered::TrySendError::Full(msg) =>
metered::TrySendError::Full(msg.try_into().unwrap()),
})
}
#[allow(clippy::type_complexity, clippy::type_repetition_in_bounds)]
fn send_messages<'life0, 'async_trait, I>(
&'life0 mut self,
msgs: I,
) -> ::core::pin::Pin<
Box<dyn ::core::future::Future<Output = ()> + ::core::marker::Send + 'async_trait>,
>
where
I: IntoIterator<Item = ApprovalDistributionMessage> + Send,
I::IntoIter: Send,
I: 'async_trait,
'life0: 'async_trait,
Self: 'async_trait,
{
self.0.send_messages(msgs.into_iter().map(|msg| msg.into()))
}
fn send_unbounded_message(&mut self, msg: ApprovalDistributionMessage) {
self.0.send_unbounded_message(msg.into())
}
fn send_message_with_priority<'life0, 'async_trait, P>(
&'life0 mut self,
msg: ApprovalDistributionMessage,
) -> ::core::pin::Pin<
Box<dyn ::core::future::Future<Output = ()> + ::core::marker::Send + 'async_trait>,
>
where
P: 'async_trait + Priority,
'life0: 'async_trait,
Self: 'async_trait,
{
self.0.send_message_with_priority::<P>(msg.into())
}
fn try_send_message_with_priority<P: Priority>(
&mut self,
msg: ApprovalDistributionMessage,
) -> Result<(), metered::TrySendError<ApprovalDistributionMessage>> {
self.0.try_send_message_with_priority::<P>(msg.into()).map_err(|err| match err {
// Safe to unwrap because it was built from the same type.
metered::TrySendError::Closed(msg) =>
metered::TrySendError::Closed(msg.try_into().unwrap()),
metered::TrySendError::Full(msg) =>
metered::TrySendError::Full(msg.try_into().unwrap()),
})
}
}
@@ -0,0 +1,234 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! The Metrics for Approval Voting Parallel Subsystem.
use std::collections::HashMap;
use pezkuwi_node_metrics::{metered::Meter, metrics};
use pezkuwi_overseer::prometheus;
#[derive(Default, Clone)]
pub struct Metrics(Option<MetricsInner>);
/// Approval Voting parallel metrics.
#[derive(Clone)]
pub struct MetricsInner {
// The inner metrics of the approval distribution workers.
approval_distribution: pezkuwi_approval_distribution::metrics::Metrics,
// The inner metrics of the approval voting workers.
approval_voting: pezkuwi_node_core_approval_voting::Metrics,
// Time of flight metrics for bounded channels.
to_worker_bounded_tof: prometheus::HistogramVec,
// Number of elements sent to the worker's bounded queue.
to_worker_bounded_sent: prometheus::GaugeVec<prometheus::U64>,
// Number of elements received by the worker's bounded queue.
to_worker_bounded_received: prometheus::GaugeVec<prometheus::U64>,
// Number of times senders blocked while sending messages to the worker.
to_worker_bounded_blocked: prometheus::GaugeVec<prometheus::U64>,
// Time of flight metrics for unbounded channels.
to_worker_unbounded_tof: prometheus::HistogramVec,
// Number of elements sent to the worker's unbounded queue.
to_worker_unbounded_sent: prometheus::GaugeVec<prometheus::U64>,
// Number of elements received by the worker's unbounded queue.
to_worker_unbounded_received: prometheus::GaugeVec<prometheus::U64>,
}
impl Metrics {
/// Get the approval distribution metrics.
pub fn approval_distribution_metrics(&self) -> pezkuwi_approval_distribution::metrics::Metrics {
self.0
.as_ref()
.map(|metrics_inner| metrics_inner.approval_distribution.clone())
.unwrap_or_default()
}
/// Get the approval voting metrics.
pub fn approval_voting_metrics(&self) -> pezkuwi_node_core_approval_voting::Metrics {
self.0
.as_ref()
.map(|metrics_inner| metrics_inner.approval_voting.clone())
.unwrap_or_default()
}
}
impl metrics::Metrics for Metrics {
/// Try to register the metrics.
fn try_register(
registry: &prometheus::Registry,
) -> std::result::Result<Self, prometheus::PrometheusError> {
Ok(Metrics(Some(MetricsInner {
approval_distribution: pezkuwi_approval_distribution::metrics::Metrics::try_register(
registry,
)?,
approval_voting: pezkuwi_node_core_approval_voting::Metrics::try_register(registry)?,
to_worker_bounded_tof: prometheus::register(
prometheus::HistogramVec::new(
prometheus::HistogramOpts::new(
"pezkuwi_approval_voting_parallel_worker_bounded_tof",
"Duration spent in a particular approval voting worker channel from entrance to removal",
)
.buckets(vec![
0.0001, 0.0004, 0.0016, 0.0064, 0.0256, 0.1024, 0.4096, 1.6384, 3.2768,
4.9152, 6.5536,
]),
&["worker_name"],
)?,
registry,
)?,
to_worker_bounded_sent: prometheus::register(
prometheus::GaugeVec::<prometheus::U64>::new(
prometheus::Opts::new(
"pezkuwi_approval_voting_parallel_worker_bounded_sent",
"Number of elements sent to approval voting workers' bounded queues",
),
&["worker_name"],
)?,
registry,
)?,
to_worker_bounded_received: prometheus::register(
prometheus::GaugeVec::<prometheus::U64>::new(
prometheus::Opts::new(
"pezkuwi_approval_voting_parallel_worker_bounded_received",
"Number of elements received by approval voting workers' bounded queues",
),
&["worker_name"],
)?,
registry,
)?,
to_worker_bounded_blocked: prometheus::register(
prometheus::GaugeVec::<prometheus::U64>::new(
prometheus::Opts::new(
"pezkuwi_approval_voting_parallel_worker_bounded_blocked",
"Number of times approval voting workers blocked while sending messages to a subsystem",
),
&["worker_name"],
)?,
registry,
)?,
to_worker_unbounded_tof: prometheus::register(
prometheus::HistogramVec::new(
prometheus::HistogramOpts::new(
"pezkuwi_approval_voting_parallel_worker_unbounded_tof",
"Duration spent in a particular approval voting worker channel from entrance to removal",
)
.buckets(vec![
0.0001, 0.0004, 0.0016, 0.0064, 0.0256, 0.1024, 0.4096, 1.6384, 3.2768,
4.9152, 6.5536,
]),
&["worker_name"],
)?,
registry,
)?,
to_worker_unbounded_sent: prometheus::register(
prometheus::GaugeVec::<prometheus::U64>::new(
prometheus::Opts::new(
"pezkuwi_approval_voting_parallel_worker_unbounded_sent",
"Number of elements sent to approval voting workers' unbounded queues",
),
&["worker_name"],
)?,
registry,
)?,
to_worker_unbounded_received: prometheus::register(
prometheus::GaugeVec::<prometheus::U64>::new(
prometheus::Opts::new(
"pezkuwi_approval_voting_parallel_worker_unbounded_received",
"Number of elements received by approval voting workers' unbounded queues",
),
&["worker_name"],
)?,
registry,
)?,
})))
}
}
/// The meters to watch.
#[derive(Clone)]
pub struct Meters {
bounded: Meter,
unbounded: Meter,
}
impl Meters {
pub fn new(bounded: &Meter, unbounded: &Meter) -> Self {
Self { bounded: bounded.clone(), unbounded: unbounded.clone() }
}
}
/// A metrics watcher that watches the meters and updates the metrics.
pub struct MetricsWatcher {
to_watch: HashMap<String, Meters>,
metrics: Metrics,
}
impl MetricsWatcher {
/// Create a new metrics watcher.
pub fn new(metrics: Metrics) -> Self {
Self { to_watch: HashMap::new(), metrics }
}
/// Watch the meters of a worker with this name.
pub fn watch(&mut self, worker_name: String, meters: Meters) {
self.to_watch.insert(worker_name, meters);
}
/// Collect all the metrics.
pub fn collect_metrics(&self) {
for (name, meter) in &self.to_watch {
let bounded_readouts = meter.bounded.read();
let unbounded_readouts = meter.unbounded.read();
if let Some(metrics) = self.metrics.0.as_ref() {
metrics
.to_worker_bounded_sent
.with_label_values(&[name])
.set(bounded_readouts.sent as u64);
metrics
.to_worker_bounded_received
.with_label_values(&[name])
.set(bounded_readouts.received as u64);
metrics
.to_worker_bounded_blocked
.with_label_values(&[name])
.set(bounded_readouts.blocked as u64);
metrics
.to_worker_unbounded_sent
.with_label_values(&[name])
.set(unbounded_readouts.sent as u64);
metrics
.to_worker_unbounded_received
.with_label_values(&[name])
.set(unbounded_readouts.received as u64);
let hist_bounded = metrics.to_worker_bounded_tof.with_label_values(&[name]);
for tof in bounded_readouts.tof {
hist_bounded.observe(tof.as_f64());
}
let hist_unbounded = metrics.to_worker_unbounded_tof.with_label_values(&[name]);
for tof in unbounded_readouts.tof {
hist_unbounded.observe(tof.as_f64());
}
}
}
}
}
@@ -0,0 +1,982 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! The tests for Approval Voting Parallel Subsystem.
use std::{
collections::{HashMap, HashSet},
future::Future,
sync::Arc,
time::Duration,
};
use crate::{
build_worker_handles, metrics::MetricsWatcher, prio_right, run_main_loop, start_workers,
validator_index_for_msg, ApprovalVotingParallelSubsystem, Metrics, WorkProvider,
};
use assert_matches::assert_matches;
use futures::{channel::oneshot, future, stream::PollNext, StreamExt};
use itertools::Itertools;
use pezkuwi_node_core_approval_voting::{ApprovalVotingWorkProvider, Config};
use pezkuwi_node_network_protocol::{peer_set::ValidationVersion, ObservedRole, PeerId, View};
use pezkuwi_node_primitives::approval::{
time::SystemClock,
v1::RELAY_VRF_MODULO_CONTEXT,
v2::{
AssignmentCertKindV2, AssignmentCertV2, CoreBitfield, IndirectAssignmentCertV2,
IndirectSignedApprovalVoteV2,
},
};
use pezkuwi_node_subsystem::{
messages::{ApprovalDistributionMessage, ApprovalVotingMessage, ApprovalVotingParallelMessage},
FromOrchestra,
};
use pezkuwi_node_subsystem_test_helpers::{mock::new_leaf, TestSubsystemContext};
use pezkuwi_overseer::{ActiveLeavesUpdate, OverseerSignal, SpawnGlue, TimeoutExt};
use pezkuwi_primitives::{CandidateHash, CoreIndex, Hash, ValidatorIndex};
use sc_keystore::{Keystore, LocalKeystore};
use sp_consensus::SyncOracle;
use sp_consensus_babe::{VrfPreOutput, VrfProof, VrfSignature};
use sp_core::{testing::TaskExecutor, H256};
use sp_keyring::Sr25519Keyring;
type VirtualOverseer =
pezkuwi_node_subsystem_test_helpers::TestSubsystemContextHandle<ApprovalVotingParallelMessage>;
const SLOT_DURATION_MILLIS: u64 = 6000;
pub mod test_constants {
pub(crate) const DATA_COL: u32 = 0;
pub(crate) const NUM_COLUMNS: u32 = 1;
}
fn fake_assignment_cert_v2(
block_hash: Hash,
validator: ValidatorIndex,
core_bitfield: CoreBitfield,
) -> IndirectAssignmentCertV2 {
let ctx = schnorrkel::signing_context(RELAY_VRF_MODULO_CONTEXT);
let msg = b"WhenTeyrchains?";
let mut prng = rand_core::OsRng;
let keypair = schnorrkel::Keypair::generate_with(&mut prng);
let (inout, proof, _) = keypair.vrf_sign(ctx.bytes(msg));
let preout = inout.to_preout();
IndirectAssignmentCertV2 {
block_hash,
validator,
cert: AssignmentCertV2 {
kind: AssignmentCertKindV2::RelayVRFModuloCompact { core_bitfield },
vrf: VrfSignature { pre_output: VrfPreOutput(preout), proof: VrfProof(proof) },
},
}
}
/// Creates a meaningless signature
pub fn dummy_signature() -> pezkuwi_primitives::ValidatorSignature {
sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64])
}
fn build_subsystem(
sync_oracle: Box<dyn SyncOracle + Send>,
) -> (
ApprovalVotingParallelSubsystem,
TestSubsystemContext<ApprovalVotingParallelMessage, SpawnGlue<TaskExecutor>>,
VirtualOverseer,
) {
sp_tracing::init_for_tests();
let pool = sp_core::testing::TaskExecutor::new();
let (context, virtual_overseer) = pezkuwi_node_subsystem_test_helpers::make_subsystem_context::<
ApprovalVotingParallelMessage,
_,
>(pool.clone());
let keystore = LocalKeystore::in_memory();
let _ = keystore.sr25519_generate_new(
pezkuwi_primitives::TEYRCHAIN_KEY_TYPE_ID,
Some(&Sr25519Keyring::Alice.to_seed()),
);
let clock = Arc::new(SystemClock {});
let db = kvdb_memorydb::create(test_constants::NUM_COLUMNS);
let db = pezkuwi_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[]);
(
ApprovalVotingParallelSubsystem::with_config_and_clock(
Config {
col_approval_data: test_constants::DATA_COL,
slot_duration_millis: SLOT_DURATION_MILLIS,
},
Arc::new(db),
Arc::new(keystore),
sync_oracle,
Metrics::default(),
clock.clone(),
SpawnGlue(pool),
None,
),
context,
virtual_overseer,
)
}
#[derive(Clone)]
struct TestSyncOracle {}
impl SyncOracle for TestSyncOracle {
fn is_major_syncing(&self) -> bool {
false
}
fn is_offline(&self) -> bool {
unimplemented!("not used in network bridge")
}
}
fn test_harness<T, Clos, State>(
num_approval_distro_workers: usize,
prio_right: Clos,
subsystem_gracefully_exits: bool,
test_fn: impl FnOnce(
VirtualOverseer,
WorkProvider<ApprovalVotingMessage, Clos, State>,
Vec<WorkProvider<ApprovalDistributionMessage, Clos, State>>,
) -> T,
) where
T: Future<Output = VirtualOverseer>,
Clos: Clone + FnMut(&mut State) -> PollNext,
State: Default,
{
let (subsystem, context, virtual_overseer) = build_subsystem(Box::new(TestSyncOracle {}));
let mut metrics_watcher = MetricsWatcher::new(subsystem.metrics.clone());
let channel_size = 5;
let (to_approval_voting_worker, approval_voting_work_provider) =
build_worker_handles::<ApprovalVotingMessage, _, _>(
"to_approval_voting_worker".into(),
channel_size,
&mut metrics_watcher,
prio_right.clone(),
);
let approval_distribution_channels = { 0..num_approval_distro_workers }
.into_iter()
.map(|worker_index| {
build_worker_handles::<ApprovalDistributionMessage, _, _>(
format!("to_approval_distro/{}", worker_index),
channel_size,
&mut metrics_watcher,
prio_right.clone(),
)
})
.collect_vec();
let to_approval_distribution_workers =
approval_distribution_channels.iter().map(|(tx, _)| tx.clone()).collect_vec();
let approval_distribution_work_providers =
approval_distribution_channels.into_iter().map(|(_, rx)| rx).collect_vec();
let subsystem = async move {
let result = run_main_loop(
context,
to_approval_voting_worker,
to_approval_distribution_workers,
metrics_watcher,
)
.await;
if subsystem_gracefully_exits && result.is_err() {
result
} else {
Ok(())
}
};
let test_fut = test_fn(
virtual_overseer,
approval_voting_work_provider,
approval_distribution_work_providers,
);
futures::pin_mut!(test_fut);
futures::pin_mut!(subsystem);
futures::executor::block_on(future::join(
async move {
let _overseer = test_fut.await;
},
subsystem,
))
.1
.unwrap();
}
const TIMEOUT: Duration = Duration::from_millis(2000);
async fn overseer_signal(overseer: &mut VirtualOverseer, signal: OverseerSignal) {
overseer
.send(FromOrchestra::Signal(signal))
.timeout(TIMEOUT)
.await
.expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT));
}
async fn overseer_message(overseer: &mut VirtualOverseer, msg: ApprovalVotingParallelMessage) {
overseer
.send(FromOrchestra::Communication { msg })
.timeout(TIMEOUT)
.await
.expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT));
}
async fn run_start_workers() {
let (subsystem, mut context, _) = build_subsystem(Box::new(TestSyncOracle {}));
let mut metrics_watcher = MetricsWatcher::new(subsystem.metrics.clone());
let _workers = start_workers(&mut context, subsystem, &mut metrics_watcher).await.unwrap();
}
// Test starting the workers succeeds.
#[test]
fn start_workers_succeeds() {
futures::executor::block_on(run_start_workers());
}
// Test main loop forwards messages to the correct worker for all type of messages.
#[test]
fn test_main_loop_forwards_correctly() {
let num_approval_distro_workers = 4;
test_harness(
num_approval_distro_workers,
prio_right,
true,
|mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move {
// 1. Check Signals are correctly forwarded to the workers.
let signal = OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf(
Hash::random(),
1,
)));
overseer_signal(&mut overseer, signal.clone()).await;
let approval_voting_receives = approval_voting_work_provider.recv().await.unwrap();
assert_matches!(approval_voting_receives, FromOrchestra::Signal(_));
for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() {
let approval_distribution_receives =
rx_approval_distribution_worker.next().await.unwrap();
assert_matches!(approval_distribution_receives, FromOrchestra::Signal(_));
}
let (test_tx, _rx) = oneshot::channel();
let test_hash = Hash::random();
let test_block_nr = 2;
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::ApprovedAncestor(test_hash, test_block_nr, test_tx),
)
.await;
assert_matches!(
approval_voting_work_provider.recv().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalVotingMessage::ApprovedAncestor(hash, block_nr, _)
} => {
assert_eq!(hash, test_hash);
assert_eq!(block_nr, test_block_nr);
}
);
for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() {
assert!(rx_approval_distribution_worker
.next()
.timeout(Duration::from_millis(200))
.await
.is_none());
}
// 2. Check GetApprovalSignaturesForCandidate is correctly forwarded to the workers.
let (test_tx, _rx) = oneshot::channel();
let test_hash = CandidateHash(Hash::random());
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::GetApprovalSignaturesForCandidate(
test_hash, test_tx,
),
)
.await;
assert_matches!(
approval_voting_work_provider.recv().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalVotingMessage::GetApprovalSignaturesForCandidate(hash, _)
} => {
assert_eq!(hash, test_hash);
}
);
for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() {
assert!(rx_approval_distribution_worker
.next()
.timeout(Duration::from_millis(200))
.await
.is_none());
}
// 3. Check NewBlocks is correctly forwarded to the workers.
overseer_message(&mut overseer, ApprovalVotingParallelMessage::NewBlocks(vec![])).await;
for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() {
assert_matches!(rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::NewBlocks(blocks)
} => {
assert!(blocks.is_empty());
}
);
}
assert!(approval_voting_work_provider
.recv()
.timeout(Duration::from_millis(200))
.await
.is_none());
// 4. Check DistributeAssignment is correctly forwarded to the workers.
let validator_index = ValidatorIndex(17);
let assignment =
fake_assignment_cert_v2(Hash::random(), validator_index, CoreIndex(1).into());
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::DistributeAssignment(assignment.clone(), 1.into()),
)
.await;
for (index, rx_approval_distribution_worker) in
rx_approval_distribution_workers.iter_mut().enumerate()
{
if index == validator_index.0 as usize % num_approval_distro_workers {
assert_matches!(rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::DistributeAssignment(cert, bitfield)
} => {
assert_eq!(cert, assignment);
assert_eq!(bitfield, 1.into());
}
);
} else {
assert!(rx_approval_distribution_worker
.next()
.timeout(Duration::from_millis(200))
.await
.is_none());
}
}
assert!(approval_voting_work_provider
.recv()
.timeout(Duration::from_millis(200))
.await
.is_none());
// 5. Check DistributeApproval is correctly forwarded to the workers.
let validator_index = ValidatorIndex(26);
let expected_vote = IndirectSignedApprovalVoteV2 {
block_hash: H256::random(),
candidate_indices: 1.into(),
validator: validator_index,
signature: dummy_signature(),
};
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::DistributeApproval(expected_vote.clone()),
)
.await;
for (index, rx_approval_distribution_worker) in
rx_approval_distribution_workers.iter_mut().enumerate()
{
if index == validator_index.0 as usize % num_approval_distro_workers {
assert_matches!(rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::DistributeApproval(vote)
} => {
assert_eq!(vote, expected_vote);
}
);
} else {
assert!(rx_approval_distribution_worker
.next()
.timeout(Duration::from_millis(200))
.await
.is_none());
}
}
// 6. Check NetworkBridgeUpdate::PeerMessage is correctly forwarded just to one of the
// workers.
let approvals = vec![
IndirectSignedApprovalVoteV2 {
block_hash: H256::random(),
candidate_indices: 1.into(),
validator: validator_index,
signature: dummy_signature(),
},
IndirectSignedApprovalVoteV2 {
block_hash: H256::random(),
candidate_indices: 2.into(),
validator: validator_index,
signature: dummy_signature(),
},
];
let expected_msg = pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(
approvals.clone(),
),
);
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::NetworkBridgeUpdate(
pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerMessage(
PeerId::random(),
expected_msg.clone(),
),
),
)
.await;
for (index, rx_approval_distribution_worker) in
rx_approval_distribution_workers.iter_mut().enumerate()
{
if index == validator_index.0 as usize % num_approval_distro_workers {
assert_matches!(rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::NetworkBridgeUpdate(
pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerMessage(
_,
msg,
),
)
} => {
assert_eq!(msg, expected_msg);
}
);
} else {
assert!(rx_approval_distribution_worker
.next()
.timeout(Duration::from_millis(200))
.await
.is_none());
}
}
assert!(approval_voting_work_provider
.recv()
.timeout(Duration::from_millis(200))
.await
.is_none());
assert!(approval_voting_work_provider
.recv()
.timeout(Duration::from_millis(200))
.await
.is_none());
// 7. Check NetworkBridgeUpdate::PeerConnected is correctly forwarded to all workers.
let expected_peer_id = PeerId::random();
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::NetworkBridgeUpdate(
pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerConnected(
expected_peer_id,
ObservedRole::Authority,
ValidationVersion::V3.into(),
None,
),
),
)
.await;
for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() {
assert_matches!(rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::NetworkBridgeUpdate(
pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerConnected(
peer_id,
role,
version,
authority_id,
),
)
} => {
assert_eq!(peer_id, expected_peer_id);
assert_eq!(role, ObservedRole::Authority);
assert_eq!(version, ValidationVersion::V3.into());
assert_eq!(authority_id, None);
}
);
}
assert!(approval_voting_work_provider
.recv()
.timeout(Duration::from_millis(200))
.await
.is_none());
// 8. Check ApprovalCheckingLagUpdate is correctly forwarded to all workers.
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::ApprovalCheckingLagUpdate(7),
)
.await;
for rx_approval_distribution_worker in rx_approval_distribution_workers.iter_mut() {
assert_matches!(rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::ApprovalCheckingLagUpdate(
lag
)
} => {
assert_eq!(lag, 7);
}
);
}
assert!(approval_voting_work_provider
.recv()
.timeout(Duration::from_millis(200))
.await
.is_none());
overseer_signal(&mut overseer, OverseerSignal::Conclude).await;
overseer
},
);
}
/// Test GetApprovalSignatures correctly gatheres the signatures from all workers.
#[test]
fn test_handle_get_approval_signatures() {
let num_approval_distro_workers = 4;
test_harness(
num_approval_distro_workers,
prio_right,
true,
|mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move {
let (tx, rx) = oneshot::channel();
let first_block = Hash::random();
let second_block = Hash::random();
let expected_candidates: HashSet<_> =
vec![(first_block, 2), (second_block, 3)].into_iter().collect();
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::GetApprovalSignatures(
expected_candidates.clone(),
tx,
),
)
.await;
assert!(approval_voting_work_provider
.recv()
.timeout(Duration::from_millis(200))
.await
.is_none());
let mut all_votes = HashMap::new();
for (index, rx_approval_distribution_worker) in
rx_approval_distribution_workers.iter_mut().enumerate()
{
assert_matches!(rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::GetApprovalSignatures(
candidates, tx
)
} => {
assert_eq!(candidates, expected_candidates);
let to_send: HashMap<_, _> = {0..10}.into_iter().map(|validator| {
let validator_index = ValidatorIndex(validator as u32 * num_approval_distro_workers as u32 + index as u32);
(validator_index, (first_block, vec![2, 4], dummy_signature()))
}).collect();
tx.send(to_send.clone()).unwrap();
all_votes.extend(to_send.clone());
}
);
}
let received_votes = rx.await.unwrap();
assert_eq!(received_votes, all_votes);
overseer_signal(&mut overseer, OverseerSignal::Conclude).await;
overseer
},
)
}
/// Test subsystem exits with error when approval_voting_work_provider exits.
#[test]
fn test_subsystem_exits_with_error_if_approval_voting_worker_errors() {
let num_approval_distro_workers = 4;
test_harness(
num_approval_distro_workers,
prio_right,
false,
|overseer, approval_voting_work_provider, _rx_approval_distribution_workers| async move {
// Drop the approval_voting_work_provider to simulate an error.
std::mem::drop(approval_voting_work_provider);
overseer
},
)
}
/// Test subsystem exits with error when approval_distribution_workers exits.
#[test]
fn test_subsystem_exits_with_error_if_approval_distribution_worker_errors() {
let num_approval_distro_workers = 4;
test_harness(
num_approval_distro_workers,
prio_right,
false,
|overseer, _approval_voting_work_provider, rx_approval_distribution_workers| async move {
// Drop the approval_distribution_workers to simulate an error.
std::mem::drop(rx_approval_distribution_workers.into_iter().next().unwrap());
overseer
},
)
}
/// Test signals sent before messages are processed in order.
#[test]
fn test_signal_before_message_keeps_receive_order() {
let num_approval_distro_workers = 4;
test_harness(
num_approval_distro_workers,
prio_right,
true,
|mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move {
let signal = OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf(
Hash::random(),
1,
)));
overseer_signal(&mut overseer, signal.clone()).await;
let validator_index = ValidatorIndex(17);
let assignment =
fake_assignment_cert_v2(Hash::random(), validator_index, CoreIndex(1).into());
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::DistributeAssignment(assignment.clone(), 1.into()),
)
.await;
let approval_voting_receives = approval_voting_work_provider.recv().await.unwrap();
assert_matches!(approval_voting_receives, FromOrchestra::Signal(_));
let rx_approval_distribution_worker = rx_approval_distribution_workers
.get_mut(validator_index.0 as usize % num_approval_distro_workers)
.unwrap();
let approval_distribution_receives =
rx_approval_distribution_worker.next().await.unwrap();
assert_matches!(approval_distribution_receives, FromOrchestra::Signal(_));
assert_matches!(
rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::DistributeAssignment(_, _)
}
);
overseer_signal(&mut overseer, OverseerSignal::Conclude).await;
overseer
},
)
}
/// Test signals sent after messages are processed with the highest priority.
#[test]
fn test_signal_is_prioritized_when_unread_messages_in_the_queue() {
let num_approval_distro_workers = 4;
test_harness(
num_approval_distro_workers,
prio_right,
true,
|mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move {
let validator_index = ValidatorIndex(17);
let assignment =
fake_assignment_cert_v2(Hash::random(), validator_index, CoreIndex(1).into());
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::DistributeAssignment(assignment.clone(), 1.into()),
)
.await;
let signal = OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(new_leaf(
Hash::random(),
1,
)));
overseer_signal(&mut overseer, signal.clone()).await;
let approval_voting_receives = approval_voting_work_provider.recv().await.unwrap();
assert_matches!(approval_voting_receives, FromOrchestra::Signal(_));
let rx_approval_distribution_worker = rx_approval_distribution_workers
.get_mut(validator_index.0 as usize % num_approval_distro_workers)
.unwrap();
let approval_distribution_receives =
rx_approval_distribution_worker.next().await.unwrap();
assert_matches!(approval_distribution_receives, FromOrchestra::Signal(_));
assert_matches!(
rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::DistributeAssignment(_, _)
}
);
overseer_signal(&mut overseer, OverseerSignal::Conclude).await;
overseer
},
)
}
/// Test peer view updates have higher priority than normal messages.
#[test]
fn test_peer_view_is_prioritized_when_unread_messages_in_the_queue() {
let num_approval_distro_workers = 4;
test_harness(
num_approval_distro_workers,
prio_right,
true,
|mut overseer, mut approval_voting_work_provider, mut rx_approval_distribution_workers| async move {
let validator_index = ValidatorIndex(17);
let approvals = vec![
IndirectSignedApprovalVoteV2 {
block_hash: H256::random(),
candidate_indices: 1.into(),
validator: validator_index,
signature: dummy_signature(),
},
IndirectSignedApprovalVoteV2 {
block_hash: H256::random(),
candidate_indices: 2.into(),
validator: validator_index,
signature: dummy_signature(),
},
];
let expected_msg = pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(
approvals.clone(),
),
);
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::NetworkBridgeUpdate(
pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerMessage(
PeerId::random(),
expected_msg.clone(),
),
),
)
.await;
overseer_message(
&mut overseer,
ApprovalVotingParallelMessage::NetworkBridgeUpdate(
pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerViewChange(
PeerId::random(),
View::default(),
),
),
)
.await;
for (index, rx_approval_distribution_worker) in
rx_approval_distribution_workers.iter_mut().enumerate()
{
assert_matches!(rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::NetworkBridgeUpdate(
pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerViewChange(
_,
_,
),
)
} => {
}
);
if index == validator_index.0 as usize % num_approval_distro_workers {
assert_matches!(rx_approval_distribution_worker.next().await.unwrap(),
FromOrchestra::Communication {
msg: ApprovalDistributionMessage::NetworkBridgeUpdate(
pezkuwi_node_subsystem::messages::NetworkBridgeEvent::PeerMessage(
_,
msg,
),
)
} => {
assert_eq!(msg, expected_msg);
}
);
} else {
assert!(rx_approval_distribution_worker
.next()
.timeout(Duration::from_millis(200))
.await
.is_none());
}
}
assert!(approval_voting_work_provider
.recv()
.timeout(Duration::from_millis(200))
.await
.is_none());
overseer_signal(&mut overseer, OverseerSignal::Conclude).await;
overseer
},
)
}
// Test validator_index_for_msg with empty messages.
#[test]
fn test_validator_index_with_empty_message() {
let result = validator_index_for_msg(pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(vec![]),
));
assert_eq!(result, (None, Some(vec![])));
let result = validator_index_for_msg(pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(vec![]),
));
assert_eq!(result, (None, Some(vec![])));
}
// Test validator_index_for_msg when all the messages are originating from the same validator.
#[test]
fn test_validator_index_with_all_messages_from_the_same_validator() {
let validator_index = ValidatorIndex(3);
let v3_assignment = pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(vec![
(
fake_assignment_cert_v2(H256::random(), validator_index, CoreIndex(1).into()),
1.into(),
),
(
fake_assignment_cert_v2(H256::random(), validator_index, CoreIndex(3).into()),
3.into(),
),
]),
);
let result = validator_index_for_msg(v3_assignment.clone());
assert_eq!(result, (Some((validator_index, v3_assignment)), None));
let v3_approval = pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(vec![
IndirectSignedApprovalVoteV2 {
block_hash: H256::random(),
candidate_indices: 1.into(),
validator: validator_index,
signature: dummy_signature(),
},
IndirectSignedApprovalVoteV2 {
block_hash: H256::random(),
candidate_indices: 1.into(),
validator: validator_index,
signature: dummy_signature(),
},
]),
);
let result = validator_index_for_msg(v3_approval.clone());
assert_eq!(result, (Some((validator_index, v3_approval)), None));
}
// Test validator_index_for_msg when all the messages are originating from different validators,
// so the function should split them by validator index, so we can forward them separately to the
// worker they are assigned to.
#[test]
fn test_validator_index_with_messages_from_different_validators() {
let first_validator_index = ValidatorIndex(3);
let second_validator_index = ValidatorIndex(4);
let assignments = vec![
(
fake_assignment_cert_v2(H256::random(), first_validator_index, CoreIndex(1).into()),
1.into(),
),
(
fake_assignment_cert_v2(H256::random(), second_validator_index, CoreIndex(3).into()),
3.into(),
),
];
let v3_assignment = pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(
assignments.clone(),
),
);
let result = validator_index_for_msg(v3_assignment.clone());
assert_matches!(result, (None, Some(_)));
let messsages_split_by_validator = result.1.unwrap();
assert_eq!(messsages_split_by_validator.len(), assignments.len());
for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate()
{
assert_eq!(validator_index, assignments[index].0.validator);
assert_eq!(
message,
pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Assignments(
assignments.get(index).into_iter().cloned().collect(),
),
)
);
}
let approvals = vec![
IndirectSignedApprovalVoteV2 {
block_hash: H256::random(),
candidate_indices: 1.into(),
validator: first_validator_index,
signature: dummy_signature(),
},
IndirectSignedApprovalVoteV2 {
block_hash: H256::random(),
candidate_indices: 2.into(),
validator: second_validator_index,
signature: dummy_signature(),
},
];
let v3_approvals = pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(
approvals.clone(),
),
);
let result = validator_index_for_msg(v3_approvals.clone());
assert_matches!(result, (None, Some(_)));
let messsages_split_by_validator = result.1.unwrap();
assert_eq!(messsages_split_by_validator.len(), approvals.len());
for (index, (validator_index, message)) in messsages_split_by_validator.into_iter().enumerate()
{
assert_eq!(validator_index, approvals[index].validator);
assert_eq!(
message,
pezkuwi_node_network_protocol::ValidationProtocols::V3(
pezkuwi_node_network_protocol::v3::ApprovalDistributionMessage::Approvals(
approvals.get(index).into_iter().cloned().collect(),
),
)
);
}
}