Split NetworkBridge and break cycles with Unbounded (#2736)

* overseer: pass messages directly between subsystems

* test that message is held on to

* Update node/overseer/src/lib.rs

Co-authored-by: Peter Goodspeed-Niklaus <coriolinus@users.noreply.github.com>

* give every subsystem an unbounded sender too

* remove metered_channel::name

1. we don't provide good names
2. these names are never used anywhere

* unused mut

* remove unnecessary &mut

* subsystem unbounded_send

* remove unused MaybeTimer

We have channel size metrics that serve the same purpose better now and the implementation of message timing was pretty ugly.

* remove comment

* split up senders and receivers

* update metrics

* fix tests

* fix test subsystem context

* use SubsystemSender in jobs system now

* refactor of awful jobs code

* expose public `run` on JobSubsystem

* update candidate backing to new jobs & use unbounded

* bitfield signing

* candidate-selection

* provisioner

* approval voting: send unbounded for assignment/approvals

* async not needed

* begin bridge split

* split up network tasks into background worker

* port over network bridge

* Update node/network/bridge/src/lib.rs

Co-authored-by: Andronik Ordian <write@reusable.software>

* rename ValidationWorkerNotifications

Co-authored-by: Peter Goodspeed-Niklaus <coriolinus@users.noreply.github.com>
Co-authored-by: Andronik Ordian <write@reusable.software>
This commit is contained in:
Robert Habermeier
2021-03-29 01:18:53 +02:00
committed by GitHub
parent 6f464a360f
commit 8ebbe19d10
16 changed files with 1191 additions and 1346 deletions
+146 -145
View File
@@ -35,7 +35,8 @@ use polkadot_node_primitives::{
Statement, SignedFullStatement, ValidationResult,
};
use polkadot_subsystem::{
PerLeafSpan, Stage, jaeger,
PerLeafSpan, Stage, SubsystemSender,
jaeger,
messages::{
AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage,
CandidateBackingMessage, CandidateSelectionMessage, CandidateValidationMessage,
@@ -50,8 +51,8 @@ use polkadot_node_subsystem_util::{
request_validators,
request_from_runtime,
Validator,
delegated_subsystem,
FromJobCommand,
JobSender,
metrics::{self, prometheus},
};
use statement_table::{
@@ -68,8 +69,9 @@ use thiserror::Error;
const LOG_TARGET: &str = "parachain::candidate-backing";
/// Errors that can occur in candidate backing.
#[derive(Debug, Error)]
enum Error {
pub enum Error {
#[error("Candidate is not found")]
CandidateNotFound,
#[error("Signature is invalid")]
@@ -142,11 +144,9 @@ impl ValidatedCandidateCommand {
}
/// Holds all data needed for candidate backing job operation.
struct CandidateBackingJob {
pub struct CandidateBackingJob {
/// The hash of the relay parent on top of which this job is doing it's work.
parent: Hash,
/// Outbound message channel sending part.
tx_from: mpsc::Sender<FromJobCommand>,
/// The `ParaId` assigned to this validator
assignment: Option<ParaId>,
/// The collator required to author the candidate, if any.
@@ -294,23 +294,20 @@ fn table_attested_to_backed(
}
async fn store_available_data(
tx_from: &mut mpsc::Sender<FromJobCommand>,
sender: &mut JobSender<impl SubsystemSender>,
id: Option<ValidatorIndex>,
n_validators: u32,
candidate_hash: CandidateHash,
available_data: AvailableData,
) -> Result<(), Error> {
let (tx, rx) = oneshot::channel();
tx_from.send(AllMessages::AvailabilityStore(
AvailabilityStoreMessage::StoreAvailableData(
candidate_hash,
id,
n_validators,
available_data,
tx,
)
).into()
).await?;
sender.send_message(AvailabilityStoreMessage::StoreAvailableData(
candidate_hash,
id,
n_validators,
available_data,
tx,
).into()).await;
let _ = rx.await.map_err(Error::StoreAvailableData)?;
@@ -321,9 +318,9 @@ async fn store_available_data(
//
// This will compute the erasure root internally and compare it to the expected erasure root.
// This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`.
#[tracing::instrument(level = "trace", skip(tx_from, pov, span), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(sender, pov, span), fields(subsystem = LOG_TARGET))]
async fn make_pov_available(
tx_from: &mut mpsc::Sender<FromJobCommand>,
sender: &mut JobSender<impl SubsystemSender>,
validator_index: Option<ValidatorIndex>,
n_validators: usize,
pov: Arc<PoV>,
@@ -361,7 +358,7 @@ async fn make_pov_available(
);
store_available_data(
tx_from,
sender,
validator_index,
n_validators as u32,
candidate_hash,
@@ -373,7 +370,7 @@ async fn make_pov_available(
}
async fn request_pov(
tx_from: &mut mpsc::Sender<FromJobCommand>,
sender: &mut JobSender<impl SubsystemSender>,
relay_parent: Hash,
from_validator: ValidatorIndex,
candidate_hash: CandidateHash,
@@ -381,35 +378,33 @@ async fn request_pov(
) -> Result<Arc<PoV>, Error> {
let (tx, rx) = oneshot::channel();
tx_from.send(FromJobCommand::SendMessage(AllMessages::AvailabilityDistribution(
AvailabilityDistributionMessage::FetchPoV {
relay_parent,
from_validator,
candidate_hash,
pov_hash,
tx,
}
))).await?;
sender.send_message(AvailabilityDistributionMessage::FetchPoV {
relay_parent,
from_validator,
candidate_hash,
pov_hash,
tx,
}.into()).await;
let pov = rx.await.map_err(|_| Error::FetchPoV)?;
Ok(Arc::new(pov))
}
async fn request_candidate_validation(
tx_from: &mut mpsc::Sender<FromJobCommand>,
sender: &mut JobSender<impl SubsystemSender>,
candidate: CandidateDescriptor,
pov: Arc<PoV>,
) -> Result<ValidationResult, Error> {
let (tx, rx) = oneshot::channel();
tx_from.send(AllMessages::CandidateValidation(
sender.send_message(AllMessages::CandidateValidation(
CandidateValidationMessage::ValidateFromChainState(
candidate,
pov,
tx,
)
).into()
).await?;
).await;
match rx.await {
Ok(Ok(validation_result)) => Ok(validation_result),
@@ -420,8 +415,8 @@ async fn request_candidate_validation(
type BackgroundValidationResult = Result<(CandidateReceipt, CandidateCommitments, Arc<PoV>), CandidateReceipt>;
struct BackgroundValidationParams<F> {
tx_from: mpsc::Sender<FromJobCommand>,
struct BackgroundValidationParams<S, F> {
sender: JobSender<S>,
tx_command: mpsc::Sender<ValidatedCandidateCommand>,
candidate: CandidateReceipt,
relay_parent: Hash,
@@ -433,10 +428,13 @@ struct BackgroundValidationParams<F> {
}
async fn validate_and_make_available(
params: BackgroundValidationParams<impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Sync>,
params: BackgroundValidationParams<
impl SubsystemSender,
impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Sync,
>
) -> Result<(), Error> {
let BackgroundValidationParams {
mut tx_from,
mut sender,
mut tx_command,
candidate,
relay_parent,
@@ -456,12 +454,12 @@ async fn validate_and_make_available(
} => {
let _span = span.as_ref().map(|s| s.child("request-pov"));
match request_pov(
&mut tx_from,
relay_parent,
from_validator,
candidate_hash,
pov_hash,
).await {
&mut sender,
relay_parent,
from_validator,
candidate_hash,
pov_hash,
).await {
Err(Error::FetchPoV) => {
tx_command.send(ValidatedCandidateCommand::AttestNoPoV(candidate.hash())).await.map_err(Error::Mpsc)?;
return Ok(())
@@ -478,7 +476,7 @@ async fn validate_and_make_available(
.with_pov(&pov)
.with_para_id(candidate.descriptor().para_id)
});
request_candidate_validation(&mut tx_from, candidate.descriptor.clone(), pov.clone()).await?
request_candidate_validation(&mut sender, candidate.descriptor.clone(), pov.clone()).await?
};
let expected_commitments_hash = candidate.commitments_hash;
@@ -502,7 +500,7 @@ async fn validate_and_make_available(
Err(candidate)
} else {
let erasure_valid = make_pov_available(
&mut tx_from,
&mut sender,
validator_index,
n_validators,
pov.clone(),
@@ -544,6 +542,7 @@ impl CandidateBackingJob {
/// Run asynchronously.
async fn run_loop(
mut self,
mut sender: JobSender<impl SubsystemSender>,
mut rx_to: mpsc::Receiver<CandidateBackingMessage>,
span: PerLeafSpan,
) -> Result<(), Error> {
@@ -552,7 +551,7 @@ impl CandidateBackingJob {
validated_command = self.background_validation.next() => {
let _span = span.child("process-validation-result");
if let Some(c) = validated_command {
self.handle_validated_candidate_command(&span, c).await?;
self.handle_validated_candidate_command(&span, &mut sender, c).await?;
} else {
panic!("`self` hasn't dropped and `self` holds a reference to this sender; qed");
}
@@ -563,7 +562,7 @@ impl CandidateBackingJob {
// we intentionally want spans created in `process_msg` to descend from the
// `span ` which is longer-lived than this ephemeral timing span.
let _timing_span = span.child("process-message");
self.process_msg(&span, msg).await?;
self.process_msg(&span, &mut sender, msg).await?;
}
}
}
@@ -572,10 +571,11 @@ impl CandidateBackingJob {
Ok(())
}
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, parent_span, sender), fields(subsystem = LOG_TARGET))]
async fn handle_validated_candidate_command(
&mut self,
parent_span: &jaeger::Span,
sender: &mut JobSender<impl SubsystemSender>,
command: ValidatedCandidateCommand,
) -> Result<(), Error> {
let candidate_hash = command.candidate_hash();
@@ -596,15 +596,20 @@ impl CandidateBackingJob {
commitments,
});
if let Some(stmt) = self.sign_import_and_distribute_statement(
sender,
statement,
parent_span,
).await? {
self.issue_candidate_seconded_message(stmt).await?;
sender.send_message(
CandidateSelectionMessage::Seconded(self.parent, stmt).into()
).await;
}
}
}
Err(candidate) => {
self.issue_candidate_invalid_message(candidate).await?;
sender.send_message(
CandidateSelectionMessage::Invalid(self.parent, candidate).into()
).await;
}
}
}
@@ -615,7 +620,7 @@ impl CandidateBackingJob {
if !self.issued_statements.contains(&candidate_hash) {
if res.is_ok() {
let statement = Statement::Valid(candidate_hash);
self.sign_import_and_distribute_statement(statement, &parent_span).await?;
self.sign_import_and_distribute_statement(sender, statement, &parent_span).await?;
}
self.issued_statements.insert(candidate_hash);
}
@@ -627,7 +632,7 @@ impl CandidateBackingJob {
// Ok, another try:
let c_span = span.as_ref().map(|s| s.child("try"));
let attesting = attesting.clone();
self.kick_off_validation_work(attesting, c_span).await?
self.kick_off_validation_work(sender, attesting, c_span).await?
}
} else {
@@ -643,10 +648,12 @@ impl CandidateBackingJob {
Ok(())
}
#[tracing::instrument(level = "trace", skip(self, params), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, sender, params), fields(subsystem = LOG_TARGET))]
async fn background_validate_and_make_available(
&mut self,
sender: &mut JobSender<impl SubsystemSender>,
params: BackgroundValidationParams<
impl SubsystemSender,
impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync
>,
) -> Result<(), Error> {
@@ -658,36 +665,19 @@ impl CandidateBackingJob {
tracing::error!(target: LOG_TARGET, "Failed to validate and make available: {:?}", e);
}
};
self.tx_from.send(FromJobCommand::Spawn("Backing Validation", bg.boxed())).await?;
sender.send_command(FromJobCommand::Spawn("Backing Validation", bg.boxed())).await?;
}
Ok(())
}
async fn issue_candidate_invalid_message(
&mut self,
candidate: CandidateReceipt,
) -> Result<(), Error> {
self.tx_from.send(AllMessages::from(CandidateSelectionMessage::Invalid(self.parent, candidate)).into()).await?;
Ok(())
}
async fn issue_candidate_seconded_message(
&mut self,
statement: SignedFullStatement,
) -> Result<(), Error> {
self.tx_from.send(AllMessages::from(CandidateSelectionMessage::Seconded(self.parent, statement)).into()).await?;
Ok(())
}
/// Kick off background validation with intent to second.
#[tracing::instrument(level = "trace", skip(self, parent_span, pov), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, parent_span, sender, pov), fields(subsystem = LOG_TARGET))]
async fn validate_and_second(
&mut self,
parent_span: &jaeger::Span,
root_span: &jaeger::Span,
sender: &mut JobSender<impl SubsystemSender>,
candidate: &CandidateReceipt,
pov: Arc<PoV>,
) -> Result<(), Error> {
@@ -695,7 +685,9 @@ impl CandidateBackingJob {
if self.required_collator.as_ref()
.map_or(false, |c| c != &candidate.descriptor().collator)
{
self.issue_candidate_invalid_message(candidate.clone()).await?;
sender.send_message(
CandidateSelectionMessage::Invalid(self.parent, candidate.clone()).into()
).await;
return Ok(());
}
@@ -715,29 +707,36 @@ impl CandidateBackingJob {
"Validate and second candidate",
);
self.background_validate_and_make_available(BackgroundValidationParams {
tx_from: self.tx_from.clone(),
tx_command: self.background_validation_tx.clone(),
candidate: candidate.clone(),
relay_parent: self.parent,
pov: PoVData::Ready(pov),
validator_index: self.table_context.validator.as_ref().map(|v| v.index()),
n_validators: self.table_context.validators.len(),
span,
make_command: ValidatedCandidateCommand::Second,
}).await?;
let bg_sender = sender.clone();
self.background_validate_and_make_available(
sender,
BackgroundValidationParams {
sender: bg_sender,
tx_command: self.background_validation_tx.clone(),
candidate: candidate.clone(),
relay_parent: self.parent,
pov: PoVData::Ready(pov),
validator_index: self.table_context.validator.as_ref().map(|v| v.index()),
n_validators: self.table_context.validators.len(),
span,
make_command: ValidatedCandidateCommand::Second,
}
).await?;
Ok(())
}
async fn sign_import_and_distribute_statement(
&mut self,
sender: &mut JobSender<impl SubsystemSender>,
statement: Statement,
parent_span: &jaeger::Span,
) -> Result<Option<SignedFullStatement>, Error> {
if let Some(signed_statement) = self.sign_statement(statement).await {
self.import_statement(&signed_statement, parent_span).await?;
self.distribute_signed_statement(signed_statement.clone()).await?;
self.import_statement(sender, &signed_statement, parent_span).await?;
let smsg = StatementDistributionMessage::Share(self.parent, signed_statement.clone());
sender.send_unbounded_message(smsg.into());
Ok(Some(signed_statement))
} else {
Ok(None)
@@ -745,26 +744,25 @@ impl CandidateBackingJob {
}
/// Check if there have happened any new misbehaviors and issue necessary messages.
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
async fn issue_new_misbehaviors(&mut self) -> Result<(), Error> {
#[tracing::instrument(level = "trace", skip(self, sender), fields(subsystem = LOG_TARGET))]
async fn issue_new_misbehaviors(&mut self, sender: &mut JobSender<impl SubsystemSender>) {
// collect the misbehaviors to avoid double mutable self borrow issues
let misbehaviors: Vec<_> = self.table.drain_misbehaviors().collect();
for (validator_id, report) in misbehaviors {
self.send_to_provisioner(
sender.send_message(
ProvisionerMessage::ProvisionableData(
self.parent,
ProvisionableData::MisbehaviorReport(self.parent, validator_id, report)
)
).await?
).into()
).await;
}
Ok(())
}
/// Import a statement into the statement table and return the summary of the import.
#[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, sender), fields(subsystem = LOG_TARGET))]
async fn import_statement(
&mut self,
sender: &mut JobSender<impl SubsystemSender>,
statement: &SignedFullStatement,
parent_span: &jaeger::Span,
) -> Result<Option<TableSummary>, Error> {
@@ -807,7 +805,7 @@ impl CandidateBackingJob {
self.parent,
ProvisionableData::BackedCandidate(backed.receipt()),
);
self.send_to_provisioner(message).await?;
sender.send_message(message.into()).await;
span.as_ref().map(|s| s.child("backed"));
span
@@ -821,7 +819,7 @@ impl CandidateBackingJob {
None
};
self.issue_new_misbehaviors().await?;
self.issue_new_misbehaviors(sender).await;
// It is important that the child span is dropped before its parent span (`unbacked_span`)
drop(import_statement_span);
@@ -830,8 +828,13 @@ impl CandidateBackingJob {
Ok(summary)
}
#[tracing::instrument(level = "trace", skip(self, root_span), fields(subsystem = LOG_TARGET))]
async fn process_msg(&mut self, root_span: &jaeger::Span, msg: CandidateBackingMessage) -> Result<(), Error> {
#[tracing::instrument(level = "trace", skip(self, root_span, sender), fields(subsystem = LOG_TARGET))]
async fn process_msg(
&mut self,
root_span: &jaeger::Span,
sender: &mut JobSender<impl SubsystemSender>,
msg: CandidateBackingMessage,
) -> Result<(), Error> {
match msg {
CandidateBackingMessage::Second(relay_parent, candidate, pov) => {
let _timer = self.metrics.time_process_second();
@@ -856,7 +859,7 @@ impl CandidateBackingJob {
if !self.issued_statements.contains(&candidate_hash) {
let pov = Arc::new(pov);
self.validate_and_second(&span, &root_span, &candidate, pov).await?;
self.validate_and_second(&span, &root_span, sender, &candidate, pov).await?;
}
}
}
@@ -868,7 +871,7 @@ impl CandidateBackingJob {
.with_relay_parent(_relay_parent);
self.check_statement_signature(&statement)?;
match self.maybe_validate_and_import(&span, &root_span, statement).await {
match self.maybe_validate_and_import(&span, &root_span, sender, statement).await {
Err(Error::ValidationFailed(_)) => return Ok(()),
Err(e) => return Err(e),
Ok(()) => (),
@@ -893,9 +896,10 @@ impl CandidateBackingJob {
}
/// Kick off validation work and distribute the result as a signed statement.
#[tracing::instrument(level = "trace", skip(self, attesting, span), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, sender, attesting, span), fields(subsystem = LOG_TARGET))]
async fn kick_off_validation_work(
&mut self,
sender: &mut JobSender<impl SubsystemSender>,
attesting: AttestingData,
span: Option<jaeger::Span>,
) -> Result<(), Error> {
@@ -925,34 +929,38 @@ impl CandidateBackingJob {
return Ok(());
}
let bg_sender = sender.clone();
let pov = PoVData::FetchFromValidator {
from_validator: attesting.from_validator,
candidate_hash,
pov_hash: attesting.pov_hash,
};
self.background_validate_and_make_available(BackgroundValidationParams {
tx_from: self.tx_from.clone(),
tx_command: self.background_validation_tx.clone(),
candidate: attesting.candidate,
relay_parent: self.parent,
pov,
validator_index: self.table_context.validator.as_ref().map(|v| v.index()),
n_validators: self.table_context.validators.len(),
span,
make_command: ValidatedCandidateCommand::Attest,
}).await
self.background_validate_and_make_available(
sender,
BackgroundValidationParams {
sender: bg_sender,
tx_command: self.background_validation_tx.clone(),
candidate: attesting.candidate,
relay_parent: self.parent,
pov,
validator_index: self.table_context.validator.as_ref().map(|v| v.index()),
n_validators: self.table_context.validators.len(),
span,
make_command: ValidatedCandidateCommand::Attest,
},
).await
}
/// Import the statement and kick off validation work if it is a part of our assignment.
#[tracing::instrument(level = "trace", skip(self, parent_span), fields(subsystem = LOG_TARGET))]
#[tracing::instrument(level = "trace", skip(self, parent_span, root_span, sender), fields(subsystem = LOG_TARGET))]
async fn maybe_validate_and_import(
&mut self,
parent_span: &jaeger::Span,
root_span: &jaeger::Span,
sender: &mut JobSender<impl SubsystemSender>,
statement: SignedFullStatement,
) -> Result<(), Error> {
if let Some(summary) = self.import_statement(&statement, parent_span).await? {
if let Some(summary) = self.import_statement(sender, &statement, parent_span).await? {
if Some(summary.group_id) != self.assignment {
return Ok(())
}
@@ -989,7 +997,7 @@ impl CandidateBackingJob {
attesting.backing.push(statement.validator_index());
return Ok(())
} else {
// No job, so start another try with current validator:
// No job, so start another with current validator:
attesting.from_validator = statement.validator_index();
(attesting.clone(), span.as_ref().map(|s| s.child("try")))
}
@@ -1000,6 +1008,7 @@ impl CandidateBackingJob {
};
self.kick_off_validation_work(
sender,
attesting,
span,
).await?;
@@ -1089,20 +1098,6 @@ impl CandidateBackingJob {
fn remove_unbacked_span(&mut self, hash: &CandidateHash) -> Option<jaeger::Span> {
self.unbacked_candidates.remove(hash)
}
async fn send_to_provisioner(&mut self, msg: ProvisionerMessage) -> Result<(), Error> {
self.tx_from.send(AllMessages::from(msg).into()).await?;
Ok(())
}
async fn distribute_signed_statement(&mut self, s: SignedFullStatement) -> Result<(), Error> {
let smsg = StatementDistributionMessage::Share(self.parent, s);
self.tx_from.send(AllMessages::from(smsg).into()).await?;
Ok(())
}
}
impl util::JobTrait for CandidateBackingJob {
@@ -1113,14 +1108,14 @@ impl util::JobTrait for CandidateBackingJob {
const NAME: &'static str = "CandidateBackingJob";
#[tracing::instrument(skip(span, keystore, metrics, rx_to, tx_from), fields(subsystem = LOG_TARGET))]
fn run(
#[tracing::instrument(skip(span, keystore, metrics, rx_to, sender), fields(subsystem = LOG_TARGET))]
fn run<S: SubsystemSender>(
parent: Hash,
span: Arc<jaeger::Span>,
keystore: SyncCryptoStorePtr,
metrics: Metrics,
rx_to: mpsc::Receiver<Self::ToJob>,
mut tx_from: mpsc::Sender<FromJobCommand>,
mut sender: JobSender<S>,
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
async move {
macro_rules! try_runtime_api {
@@ -1147,14 +1142,14 @@ impl util::JobTrait for CandidateBackingJob {
let _span = span.child("runtime-apis");
let (validators, groups, session_index, cores) = futures::try_join!(
try_runtime_api!(request_validators(parent, &mut tx_from).await),
try_runtime_api!(request_validator_groups(parent, &mut tx_from).await),
try_runtime_api!(request_session_index_for_child(parent, &mut tx_from).await),
try_runtime_api!(request_from_runtime(
request_validators(parent, &mut sender).await,
request_validator_groups(parent, &mut sender).await,
request_session_index_for_child(parent, &mut sender).await,
request_from_runtime(
parent,
&mut tx_from,
&mut sender,
|tx| RuntimeApiRequest::AvailabilityCores(tx),
).await),
).await,
).map_err(Error::JoinMultiple)?;
let validators = try_runtime_api!(validators);
@@ -1231,7 +1226,6 @@ impl util::JobTrait for CandidateBackingJob {
let (background_tx, background_rx) = mpsc::channel(16);
let job = CandidateBackingJob {
parent,
tx_from,
assignment,
required_collator,
issued_statements: HashSet::new(),
@@ -1249,7 +1243,7 @@ impl util::JobTrait for CandidateBackingJob {
};
drop(_span);
job.run_loop(rx_to, span).await
job.run_loop(sender, rx_to, span).await
}.boxed()
}
}
@@ -1345,7 +1339,9 @@ impl metrics::Metrics for Metrics {
}
}
delegated_subsystem!(CandidateBackingJob(SyncCryptoStorePtr, Metrics) <- CandidateBackingMessage as CandidateBackingSubsystem);
/// The candidate backing subsystem.
pub type CandidateBackingSubsystem<Spawner>
= polkadot_node_subsystem_util::JobSubsystem<CandidateBackingJob, Spawner>;
#[cfg(test)]
mod tests {
@@ -1363,6 +1359,7 @@ mod tests {
use sp_keystore::{CryptoStore, SyncCryptoStore};
use statement_table::v1::Misbehavior;
use std::collections::HashMap;
use sp_tracing as _;
fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec<ValidatorId> {
val_ids.iter().map(|v| v.public().into()).collect()
@@ -1479,7 +1476,11 @@ mod tests {
let (context, virtual_overseer) = polkadot_node_subsystem_test_helpers::make_subsystem_context(pool.clone());
let subsystem = CandidateBackingSubsystem::run(context, keystore, Metrics(None), pool.clone());
let subsystem = CandidateBackingSubsystem::new(
pool.clone(),
keystore,
Metrics(None),
).run(context);
let test_fut = test(TestHarness {
virtual_overseer,
@@ -2670,7 +2671,7 @@ mod tests {
// Test whether we retry on failed PoV fetching.
#[test]
fn retry_works() {
sp_tracing::try_init_simple();
// sp_tracing::try_init_simple();
let test_state = TestState::default();
test_harness(test_state.keystore.clone(), |test_harness| async move {
let TestHarness { mut virtual_overseer } = test_harness;