Fix cycle dispute-coordinator <-> dispute-distribution (#6489)

* First iteration of message sender.

* dyn Fn variant (no cloning)

* Full implementation + Clone, without allocs on `Send`

* Further clarifications/cleanup.

* MessageSender -> NestingSender

* Doc update/clarification.

* dispute-coordinator: Send disputes on startup.

+ Some fixes, cleanup.

* Fix whitespace.

* Dispute distribution fixes, cleanup.

* Cargo.lock

* Fix spaces.

* More format fixes.

What is cargo fmt doing actually?

* More fmt fixes.

* Fix nesting sender.

* Fixes.

* Whitespace

* Enable logging.

* Guide update.

* Fmt fixes, typos.

* Remove unused function.

* Simplifications, doc fixes.

* Update roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md

Co-authored-by: Marcin S. <marcin@bytedude.com>

* Fmt + doc example fix.

Co-authored-by: eskimor <eskimor@no-such-url.com>
Co-authored-by: Marcin S. <marcin@bytedude.com>
This commit is contained in:
eskimor
2023-01-10 12:04:05 +01:00
committed by GitHub
parent 44fd95661c
commit cc650fe53d
16 changed files with 778 additions and 642 deletions
@@ -29,6 +29,7 @@ use std::{num::NonZeroUsize, time::Duration};
use futures::{channel::mpsc, FutureExt, StreamExt, TryFutureExt};
use polkadot_node_network_protocol::authority_discovery::AuthorityDiscovery;
use polkadot_node_subsystem_util::nesting_sender::NestingSender;
use sp_keystore::SyncCryptoStorePtr;
use polkadot_node_network_protocol::request_response::{incoming::IncomingRequestReceiver, v1};
@@ -51,33 +52,33 @@ use polkadot_node_subsystem_util::{runtime, runtime::RuntimeInfo};
/// to this subsystem, unknown dispute. This is to make sure, we get our vote out, even on
/// restarts.
///
/// The actual work of sending and keeping track of transmission attempts to each validator for a
/// particular dispute are done by [`SendTask`]. The purpose of the `DisputeSender` is to keep
/// track of all ongoing disputes and start and clean up `SendTask`s accordingly.
/// The actual work of sending and keeping track of transmission attempts to each validator for a
/// particular dispute are done by [`SendTask`]. The purpose of the `DisputeSender` is to keep
/// track of all ongoing disputes and start and clean up `SendTask`s accordingly.
mod sender;
use self::sender::{DisputeSender, TaskFinish};
use self::sender::{DisputeSender, DisputeSenderMessage};
/// ## The receiver [`DisputesReceiver`]
/// ## The receiver [`DisputesReceiver`]
///
/// The receiving side is implemented as `DisputesReceiver` and is run as a separate long running task within
/// this subsystem ([`DisputesReceiver::run`]).
/// The receiving side is implemented as `DisputesReceiver` and is run as a separate long running task within
/// this subsystem ([`DisputesReceiver::run`]).
///
/// Conceptually all the receiver has to do, is waiting for incoming requests which are passed in
/// via a dedicated channel and forwarding them to the dispute coordinator via
/// `DisputeCoordinatorMessage::ImportStatements`. Being the interface to the network and untrusted
/// nodes, the reality is not that simple of course. Before importing statements the receiver will
/// batch up imports as well as possible for efficient imports while maintaining timely dispute
/// resolution and handling of spamming validators:
/// Conceptually all the receiver has to do, is waiting for incoming requests which are passed in
/// via a dedicated channel and forwarding them to the dispute coordinator via
/// `DisputeCoordinatorMessage::ImportStatements`. Being the interface to the network and untrusted
/// nodes, the reality is not that simple of course. Before importing statements the receiver will
/// batch up imports as well as possible for efficient imports while maintaining timely dispute
/// resolution and handling of spamming validators:
///
/// - Drop all messages from non validator nodes, for this it requires the [`AuthorityDiscovery`]
/// service.
/// - Drop messages from a node, if it sends at a too high rate.
/// - Filter out duplicate messages (over some period of time).
/// - Drop any obviously invalid votes (invalid signatures for example).
/// - Ban peers whose votes were deemed invalid.
/// - Drop all messages from non validator nodes, for this it requires the [`AuthorityDiscovery`]
/// service.
/// - Drop messages from a node, if it sends at a too high rate.
/// - Filter out duplicate messages (over some period of time).
/// - Drop any obviously invalid votes (invalid signatures for example).
/// - Ban peers whose votes were deemed invalid.
///
/// In general dispute-distribution works on limiting the work the dispute-coordinator will have to
/// do, while at the same time making it aware of new disputes as fast as possible.
/// In general dispute-distribution works on limiting the work the dispute-coordinator will have to
/// do, while at the same time making it aware of new disputes as fast as possible.
///
/// For successfully imported votes, we will confirm the receipt of the message back to the sender.
/// This way a received confirmation guarantees, that the vote has been stored to disk by the
@@ -87,7 +88,7 @@ use self::receiver::DisputesReceiver;
/// Error and [`Result`] type for this subsystem.
mod error;
use error::{log_error, FatalError, FatalResult, Result};
use error::{log_error, Error, FatalError, FatalResult, Result};
#[cfg(test)]
mod tests;
@@ -118,10 +119,10 @@ pub struct DisputeDistributionSubsystem<AD> {
runtime: RuntimeInfo,
/// Sender for our dispute requests.
disputes_sender: DisputeSender,
disputes_sender: DisputeSender<DisputeSenderMessage>,
/// Receive messages from `SendTask`.
sender_rx: mpsc::Receiver<TaskFinish>,
/// Receive messages from `DisputeSender` background tasks.
sender_rx: mpsc::Receiver<DisputeSenderMessage>,
/// Receiver for incoming requests.
req_receiver: Option<IncomingRequestReceiver<v1::DisputeRequest>>,
@@ -167,7 +168,7 @@ where
session_cache_lru_size: NonZeroUsize::new(DISPUTE_WINDOW.get() as usize)
.expect("Dispute window can not be 0; qed"),
});
let (tx, sender_rx) = mpsc::channel(1);
let (tx, sender_rx) = NestingSender::new_root(1);
let disputes_sender = DisputeSender::new(tx, metrics.clone());
Self {
runtime,
@@ -216,9 +217,16 @@ where
log_error(result, "on FromOrchestra")?;
},
MuxedMessage::Sender(result) => {
self.disputes_sender
.on_task_message(result.ok_or(FatalError::SenderExhausted)?)
.await;
let result = self
.disputes_sender
.on_message(
&mut ctx,
&mut self.runtime,
result.ok_or(FatalError::SenderExhausted)?,
)
.await
.map_err(Error::Sender);
log_error(result, "on_message")?;
},
}
}
@@ -260,14 +268,14 @@ enum MuxedMessage {
/// Messages from other subsystems.
Subsystem(FatalResult<FromOrchestra<DisputeDistributionMessage>>),
/// Messages from spawned sender background tasks.
Sender(Option<TaskFinish>),
Sender(Option<DisputeSenderMessage>),
}
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
impl MuxedMessage {
async fn receive<Context>(
ctx: &mut Context,
from_sender: &mut mpsc::Receiver<TaskFinish>,
from_sender: &mut mpsc::Receiver<DisputeSenderMessage>,
) -> Self {
// We are only fusing here to make `select` happy, in reality we will quit if the stream
// ends.
@@ -21,19 +21,17 @@ use std::{
time::Duration,
};
use futures::{
channel::{mpsc, oneshot},
future::poll_fn,
Future,
};
use futures::{channel::oneshot, future::poll_fn, Future};
use futures_timer::Delay;
use indexmap::{map::Entry, IndexMap};
use polkadot_node_network_protocol::request_response::v1::DisputeRequest;
use polkadot_node_primitives::{CandidateVotes, DisputeMessage, SignedDisputeStatement};
use polkadot_node_subsystem::{messages::DisputeCoordinatorMessage, overseer, ActiveLeavesUpdate};
use polkadot_node_subsystem_util::runtime::RuntimeInfo;
use polkadot_primitives::v2::{CandidateHash, DisputeStatement, Hash, SessionIndex};
use polkadot_node_primitives::{DisputeMessage, DisputeStatus};
use polkadot_node_subsystem::{
messages::DisputeCoordinatorMessage, overseer, ActiveLeavesUpdate, SubsystemSender,
};
use polkadot_node_subsystem_util::{nesting_sender::NestingSender, runtime::RuntimeInfo};
use polkadot_primitives::v2::{CandidateHash, Hash, SessionIndex};
/// For each ongoing dispute we have a `SendTask` which takes care of it.
///
@@ -53,6 +51,15 @@ pub use error::{Error, FatalError, JfyiError, Result};
use self::error::JfyiErrorResult;
use crate::{Metrics, LOG_TARGET, SEND_RATE_LIMIT};
/// Messages as sent by background tasks.
#[derive(Debug)]
pub enum DisputeSenderMessage {
/// A task finished.
TaskFinish(TaskFinish),
/// A request for active disputes to the dispute-coordinator finished.
ActiveDisputesReady(JfyiErrorResult<Vec<(SessionIndex, CandidateHash, DisputeStatus)>>),
}
/// The `DisputeSender` keeps track of all ongoing disputes we need to send statements out.
///
/// For each dispute a `SendTask` is responsible for sending to the concerned validators for that
@@ -60,7 +67,7 @@ use crate::{Metrics, LOG_TARGET, SEND_RATE_LIMIT};
/// sessions/validator sets and cleans them up when they become obsolete.
///
/// The unit of work for the `DisputeSender` is a dispute, represented by `SendTask`s.
pub struct DisputeSender {
pub struct DisputeSender<M> {
/// All heads we currently consider active.
active_heads: Vec<Hash>,
@@ -72,10 +79,13 @@ pub struct DisputeSender {
/// All ongoing dispute sendings this subsystem is aware of.
///
/// Using an `IndexMap` so items can be iterated in the order of insertion.
disputes: IndexMap<CandidateHash, SendTask>,
disputes: IndexMap<CandidateHash, SendTask<M>>,
/// Sender to be cloned for `SendTask`s.
tx: mpsc::Sender<TaskFinish>,
tx: NestingSender<M, DisputeSenderMessage>,
/// `Some` if we are waiting for a response `DisputeCoordinatorMessage::ActiveDisputes`.
waiting_for_active_disputes: Option<WaitForActiveDisputesState>,
/// Future for delaying too frequent creation of dispute sending tasks.
rate_limit: RateLimit,
@@ -84,15 +94,25 @@ pub struct DisputeSender {
metrics: Metrics,
}
/// State we keep while waiting for active disputes.
///
/// When we send `DisputeCoordinatorMessage::ActiveDisputes`, this is the state we keep while
/// waiting for the response.
struct WaitForActiveDisputesState {
/// Have we seen any new sessions since last refresh?
have_new_sessions: bool,
}
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
impl DisputeSender {
impl<M: 'static + Send + Sync> DisputeSender<M> {
/// Create a new `DisputeSender` which can be used to start dispute sendings.
pub fn new(tx: mpsc::Sender<TaskFinish>, metrics: Metrics) -> Self {
pub fn new(tx: NestingSender<M, DisputeSenderMessage>, metrics: Metrics) -> Self {
Self {
active_heads: Vec::new(),
active_sessions: HashMap::new(),
disputes: IndexMap::new(),
tx,
waiting_for_active_disputes: None,
rate_limit: RateLimit::new(),
metrics,
}
@@ -122,7 +142,7 @@ impl DisputeSender {
ctx,
runtime,
&self.active_sessions,
self.tx.clone(),
NestingSender::new(self.tx.clone(), DisputeSenderMessage::TaskFinish),
req,
&self.metrics,
)
@@ -133,14 +153,47 @@ impl DisputeSender {
Ok(())
}
/// Receive message from a background task.
pub async fn on_message<Context>(
&mut self,
ctx: &mut Context,
runtime: &mut RuntimeInfo,
msg: DisputeSenderMessage,
) -> Result<()> {
match msg {
DisputeSenderMessage::TaskFinish(msg) => {
let TaskFinish { candidate_hash, receiver, result } = msg;
self.metrics.on_sent_request(result.as_metrics_label());
let task = match self.disputes.get_mut(&candidate_hash) {
None => {
// Can happen when a dispute ends, with messages still in queue:
gum::trace!(
target: LOG_TARGET,
?result,
"Received `FromSendingTask::Finished` for non existing dispute."
);
return Ok(())
},
Some(task) => task,
};
task.on_finished_send(&receiver, result);
},
DisputeSenderMessage::ActiveDisputesReady(result) => {
let state = self.waiting_for_active_disputes.take();
let have_new_sessions = state.map(|s| s.have_new_sessions).unwrap_or(false);
let active_disputes = result?;
self.handle_new_active_disputes(ctx, runtime, active_disputes, have_new_sessions)
.await?;
},
}
Ok(())
}
/// Take care of a change in active leaves.
///
/// - Initiate a retry of failed sends which are still active.
/// - Get new authorities to send messages to.
/// - Get rid of obsolete tasks and disputes.
/// - Get dispute sending started in case we missed one for some reason (e.g. on node startup)
///
/// This function ensures the `SEND_RATE_LIMIT`, therefore it might block.
/// Update our knowledge on sessions and initiate fetching for new active disputes.
pub async fn update_leaves<Context>(
&mut self,
ctx: &mut Context,
@@ -154,14 +207,58 @@ impl DisputeSender {
let have_new_sessions = self.refresh_sessions(ctx, runtime).await?;
let active_disputes = get_active_disputes(ctx).await?;
let unknown_disputes = {
let mut disputes = active_disputes.clone();
disputes.retain(|(_, c)| !self.disputes.contains_key(c));
disputes
};
// Not yet waiting for data, request an update:
match self.waiting_for_active_disputes.take() {
None => {
self.waiting_for_active_disputes =
Some(WaitForActiveDisputesState { have_new_sessions });
let mut sender = ctx.sender().clone();
let mut tx = self.tx.clone();
let active_disputes: HashSet<_> = active_disputes.into_iter().map(|(_, c)| c).collect();
let get_active_disputes_task = async move {
let result = get_active_disputes(&mut sender).await;
let result =
tx.send_message(DisputeSenderMessage::ActiveDisputesReady(result)).await;
if let Err(err) = result {
gum::debug!(
target: LOG_TARGET,
?err,
"Sending `DisputeSenderMessage` from background task failed."
);
}
};
ctx.spawn("get_active_disputes", Box::pin(get_active_disputes_task))
.map_err(FatalError::SpawnTask)?;
},
Some(state) => {
let have_new_sessions = state.have_new_sessions || have_new_sessions;
let new_state = WaitForActiveDisputesState { have_new_sessions };
self.waiting_for_active_disputes = Some(new_state);
gum::debug!(
target: LOG_TARGET,
"Dispute coordinator slow? We are still waiting for data on next active leaves update."
);
},
}
Ok(())
}
/// Handle new active disputes response.
///
/// - Initiate a retry of failed sends which are still active.
/// - Get new authorities to send messages to.
/// - Get rid of obsolete tasks and disputes.
///
/// This function ensures the `SEND_RATE_LIMIT`, therefore it might block.
async fn handle_new_active_disputes<Context>(
&mut self,
ctx: &mut Context,
runtime: &mut RuntimeInfo,
active_disputes: Vec<(SessionIndex, CandidateHash, DisputeStatus)>,
have_new_sessions: bool,
) -> Result<()> {
let active_disputes: HashSet<_> = active_disputes.into_iter().map(|(_, c, _)| c).collect();
// Cleanup obsolete senders (retain keeps order of remaining elements):
self.disputes
@@ -188,165 +285,9 @@ impl DisputeSender {
should_rate_limit = sends_happened && have_new_sessions;
}
}
// This should only be non-empty on startup, but if not - we got you covered.
//
// Initial order will not be maintained in that case, but that should be fine as disputes
// recovered at startup will be relatively "old" anyway and we assume that no more than a
// third of the validators will go offline at any point in time anyway.
for dispute in unknown_disputes {
// Rate limiting handled inside `start_send_for_dispute` (calls `start_sender`).
self.start_send_for_dispute(ctx, runtime, dispute).await?;
}
Ok(())
}
/// Receive message from a sending task.
pub async fn on_task_message(&mut self, msg: TaskFinish) {
let TaskFinish { candidate_hash, receiver, result } = msg;
self.metrics.on_sent_request(result.as_metrics_label());
let task = match self.disputes.get_mut(&candidate_hash) {
None => {
// Can happen when a dispute ends, with messages still in queue:
gum::trace!(
target: LOG_TARGET,
?result,
"Received `FromSendingTask::Finished` for non existing dispute."
);
return
},
Some(task) => task,
};
task.on_finished_send(&receiver, result);
}
/// Call `start_sender` on all passed in disputes.
///
/// Recover necessary votes for building up `DisputeMessage` and start sending for all of them.
async fn start_send_for_dispute<Context>(
&mut self,
ctx: &mut Context,
runtime: &mut RuntimeInfo,
dispute: (SessionIndex, CandidateHash),
) -> Result<()> {
let (session_index, candidate_hash) = dispute;
// A relay chain head is required as context for receiving session info information from runtime and
// storage. We will iterate `active_sessions` to find a suitable head. We assume that there is at
// least one active head which, by `session_index`, is at least as recent as the `dispute` passed in.
// We need to avoid picking an older one from a session that might not yet exist in storage.
// Related to <https://github.com/paritytech/polkadot/issues/4730> .
let ref_head = self
.active_sessions
.iter()
.find_map(|(active_session_index, head_hash)| {
// There might be more than one session index that is at least as recent as the dispute
// so we just pick the first one. Keep in mind we are talking about the session index for the
// child of block identified by `head_hash` and not the session index for the block.
if active_session_index >= &session_index {
Some(head_hash)
} else {
None
}
})
.ok_or(JfyiError::NoActiveHeads)?;
let info = runtime
.get_session_info_by_index(ctx.sender(), *ref_head, session_index)
.await?;
let our_index = match info.validator_info.our_index {
None => {
gum::trace!(
target: LOG_TARGET,
"Not a validator in that session - not starting dispute sending."
);
return Ok(())
},
Some(index) => index,
};
let votes = match get_candidate_votes(ctx, session_index, candidate_hash).await? {
None => {
gum::debug!(
target: LOG_TARGET,
?session_index,
?candidate_hash,
"No votes for active dispute?! - possible, due to race."
);
return Ok(())
},
Some(votes) => votes,
};
let our_valid_vote = votes.valid.raw().get(&our_index);
let our_invalid_vote = votes.invalid.get(&our_index);
let (valid_vote, invalid_vote) = if let Some(our_valid_vote) = our_valid_vote {
// Get some invalid vote as well:
let invalid_vote =
votes.invalid.iter().next().ok_or(JfyiError::MissingVotesFromCoordinator)?;
((&our_index, our_valid_vote), invalid_vote)
} else if let Some(our_invalid_vote) = our_invalid_vote {
// Get some valid vote as well:
let valid_vote =
votes.valid.raw().iter().next().ok_or(JfyiError::MissingVotesFromCoordinator)?;
(valid_vote, (&our_index, our_invalid_vote))
} else {
// There is no vote from us yet - nothing to do.
return Ok(())
};
let (valid_index, (kind, signature)) = valid_vote;
let valid_public = info
.session_info
.validators
.get(*valid_index)
.ok_or(JfyiError::InvalidStatementFromCoordinator)?;
let valid_signed = SignedDisputeStatement::new_checked(
DisputeStatement::Valid(*kind),
candidate_hash,
session_index,
valid_public.clone(),
signature.clone(),
)
.map_err(|()| JfyiError::InvalidStatementFromCoordinator)?;
let (invalid_index, (kind, signature)) = invalid_vote;
let invalid_public = info
.session_info
.validators
.get(*invalid_index)
.ok_or(JfyiError::InvalidValidatorIndexFromCoordinator)?;
let invalid_signed = SignedDisputeStatement::new_checked(
DisputeStatement::Invalid(*kind),
candidate_hash,
session_index,
invalid_public.clone(),
signature.clone(),
)
.map_err(|()| JfyiError::InvalidValidatorIndexFromCoordinator)?;
// Reconstructing the checked signed dispute statements is hardly useful here and wasteful,
// but I don't want to enable a bypass for the below smart constructor and this code path
// is supposed to be only hit on startup basically.
//
// Revisit this decision when the `from_signed_statements` is unneeded for the normal code
// path as well.
let message = DisputeMessage::from_signed_statements(
valid_signed,
*valid_index,
invalid_signed,
*invalid_index,
votes.candidate_receipt,
&info.session_info,
)
.map_err(JfyiError::InvalidDisputeFromCoordinator)?;
// Finally, get the party started:
self.start_sender(ctx, runtime, message).await
}
/// Make active sessions correspond to currently active heads.
///
/// Returns: true if sessions changed.
@@ -431,33 +372,14 @@ async fn get_active_session_indices<Context>(
}
/// Retrieve Set of active disputes from the dispute coordinator.
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
async fn get_active_disputes<Context>(
ctx: &mut Context,
) -> JfyiErrorResult<Vec<(SessionIndex, CandidateHash)>> {
async fn get_active_disputes<Sender>(
sender: &mut Sender,
) -> JfyiErrorResult<Vec<(SessionIndex, CandidateHash, DisputeStatus)>>
where
Sender: SubsystemSender<DisputeCoordinatorMessage>,
{
let (tx, rx) = oneshot::channel();
// Caller scope is in `update_leaves` and this is bounded by fork count.
ctx.send_unbounded_message(DisputeCoordinatorMessage::ActiveDisputes(tx));
rx.await
.map_err(|_| JfyiError::AskActiveDisputesCanceled)
.map(|disputes| disputes.into_iter().map(|d| (d.0, d.1)).collect())
}
/// Get all locally available dispute votes for a given dispute.
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
async fn get_candidate_votes<Context>(
ctx: &mut Context,
session_index: SessionIndex,
candidate_hash: CandidateHash,
) -> JfyiErrorResult<Option<CandidateVotes>> {
let (tx, rx) = oneshot::channel();
// Caller scope is in `update_leaves` and this is bounded by fork count.
ctx.send_unbounded_message(DisputeCoordinatorMessage::QueryCandidateVotes(
vec![(session_index, candidate_hash)],
tx,
));
rx.await
.map(|v| v.get(0).map(|inner| inner.to_owned().2))
.map_err(|_| JfyiError::AskCandidateVotesCanceled)
sender.send_message(DisputeCoordinatorMessage::ActiveDisputes(tx)).await;
rx.await.map_err(|_| JfyiError::AskActiveDisputesCanceled)
}
@@ -16,7 +16,7 @@
use std::collections::{HashMap, HashSet};
use futures::{channel::mpsc, future::RemoteHandle, Future, FutureExt, SinkExt};
use futures::{future::RemoteHandle, Future, FutureExt};
use polkadot_node_network_protocol::{
request_response::{
@@ -27,7 +27,7 @@ use polkadot_node_network_protocol::{
IfDisconnected,
};
use polkadot_node_subsystem::{messages::NetworkBridgeTxMessage, overseer};
use polkadot_node_subsystem_util::{metrics, runtime::RuntimeInfo};
use polkadot_node_subsystem_util::{metrics, nesting_sender::NestingSender, runtime::RuntimeInfo};
use polkadot_primitives::v2::{
AuthorityDiscoveryId, CandidateHash, Hash, SessionIndex, ValidatorIndex,
};
@@ -44,7 +44,7 @@ use crate::{
/// Keeps track of all the validators that have to be reached for a dispute.
///
/// The unit of work for a `SendTask` is an authority/validator.
pub struct SendTask {
pub struct SendTask<M> {
/// The request we are supposed to get out to all `parachain` validators of the dispute's session
/// and to all current authorities.
request: DisputeRequest,
@@ -58,7 +58,7 @@ pub struct SendTask {
has_failed_sends: bool,
/// Sender to be cloned for tasks.
tx: mpsc::Sender<TaskFinish>,
tx: NestingSender<M, TaskFinish>,
}
/// Status of a particular vote/statement delivery to a particular validator.
@@ -100,7 +100,7 @@ impl TaskResult {
}
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
impl SendTask {
impl<M: 'static + Send + Sync> SendTask<M> {
/// Initiates sending a dispute message to peers.
///
/// Creation of new `SendTask`s is subject to rate limiting. As each `SendTask` will trigger
@@ -110,7 +110,7 @@ impl SendTask {
ctx: &mut Context,
runtime: &mut RuntimeInfo,
active_sessions: &HashMap<SessionIndex, Hash>,
tx: mpsc::Sender<TaskFinish>,
tx: NestingSender<M, TaskFinish>,
request: DisputeRequest,
metrics: &Metrics,
) -> Result<Self> {
@@ -272,9 +272,9 @@ impl SendTask {
///
/// And spawn tasks for handling the response.
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
async fn send_requests<Context>(
async fn send_requests<Context, M: 'static + Send + Sync>(
ctx: &mut Context,
tx: mpsc::Sender<TaskFinish>,
tx: NestingSender<M, TaskFinish>,
receivers: Vec<AuthorityDiscoveryId>,
req: DisputeRequest,
metrics: &Metrics,
@@ -307,11 +307,11 @@ async fn send_requests<Context>(
}
/// Future to be spawned in a task for awaiting a response.
async fn wait_response_task(
async fn wait_response_task<M: 'static + Send + Sync>(
pending_response: impl Future<Output = OutgoingResult<DisputeResponse>>,
candidate_hash: CandidateHash,
receiver: AuthorityDiscoveryId,
mut tx: mpsc::Sender<TaskFinish>,
mut tx: NestingSender<M, TaskFinish>,
_timer: Option<metrics::prometheus::prometheus::HistogramTimer>,
) {
let result = pending_response.await;
@@ -320,7 +320,7 @@ async fn wait_response_task(
Ok(DisputeResponse::Confirmed) =>
TaskFinish { candidate_hash, receiver, result: TaskResult::Succeeded },
};
if let Err(err) = tx.feed(msg).await {
if let Err(err) = tx.send_message(msg).await {
gum::debug!(
target: LOG_TARGET,
%err,
@@ -45,7 +45,7 @@ use polkadot_node_network_protocol::{
request_response::{v1::DisputeResponse, Recipient, Requests},
IfDisconnected,
};
use polkadot_node_primitives::{CandidateVotes, DisputeStatus, UncheckedDisputeMessage};
use polkadot_node_primitives::DisputeStatus;
use polkadot_node_subsystem::{
messages::{
AllMessages, DisputeCoordinatorMessage, DisputeDistributionMessage, ImportStatementsResult,
@@ -479,65 +479,6 @@ fn receive_rate_limit_is_enforced() {
test_harness(test);
}
#[test]
fn disputes_are_recovered_at_startup() {
let test = |mut handle: TestSubsystemContextHandle<DisputeDistributionMessage>, _| async move {
let relay_parent = Hash::random();
let candidate = make_candidate_receipt(relay_parent);
let _ = handle_subsystem_startup(&mut handle, Some(candidate.hash())).await;
let message = make_dispute_message(candidate.clone(), ALICE_INDEX, FERDIE_INDEX).await;
// Requests needed session info:
assert_matches!(
handle.recv().await,
AllMessages::DisputeCoordinator(
DisputeCoordinatorMessage::QueryCandidateVotes(
query,
tx,
)
) => {
let (session_index, candidate_hash) = query.get(0).unwrap().clone();
assert_eq!(session_index, MOCK_SESSION_INDEX);
assert_eq!(candidate_hash, candidate.hash());
let unchecked: UncheckedDisputeMessage = message.into();
tx.send(vec![(session_index, candidate_hash, CandidateVotes {
candidate_receipt: candidate,
valid: [(
unchecked.valid_vote.validator_index,
(unchecked.valid_vote.kind,
unchecked.valid_vote.signature
),
)].into_iter().collect(),
invalid: [(
unchecked.invalid_vote.validator_index,
(
unchecked.invalid_vote.kind,
unchecked.invalid_vote.signature
),
)].into_iter().collect(),
})])
.expect("Receiver should stay alive.");
}
);
let expected_receivers = {
let info = &MOCK_SESSION_INFO;
info.discovery_keys
.clone()
.into_iter()
.filter(|a| a != &Sr25519Keyring::Ferdie.public().into())
.collect()
// All validators are also authorities in the first session, so we are
// done here.
};
check_sent_requests(&mut handle, expected_receivers, true).await;
conclude(&mut handle).await;
};
test_harness(test);
}
#[test]
fn send_dispute_gets_cleaned_up() {
let test = |mut handle: TestSubsystemContextHandle<DisputeDistributionMessage>, _| async move {
@@ -605,6 +546,7 @@ fn send_dispute_gets_cleaned_up() {
#[test]
fn dispute_retries_and_works_across_session_boundaries() {
sp_tracing::try_init_simple();
let test = |mut handle: TestSubsystemContextHandle<DisputeDistributionMessage>, _| async move {
let old_head = handle_subsystem_startup(&mut handle, None).await;