mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-01 08:57:56 +00:00
refactor+feat: allow subsystems to send only declared messages, generate graphviz (#5314)
Closes #3774 Closes #3826
This commit is contained in:
committed by
GitHub
parent
26340b9054
commit
511891dcce
Generated
+5
@@ -7028,10 +7028,14 @@ version = "0.9.19"
|
||||
dependencies = [
|
||||
"assert_matches",
|
||||
"expander 0.0.6",
|
||||
"petgraph",
|
||||
"polkadot-overseer-gen",
|
||||
"proc-macro-crate",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"thiserror",
|
||||
"tracing-gum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7400,6 +7404,7 @@ dependencies = [
|
||||
"polkadot-node-primitives",
|
||||
"polkadot-node-subsystem",
|
||||
"polkadot-node-subsystem-test-helpers",
|
||||
"polkadot-node-subsystem-types",
|
||||
"polkadot-node-subsystem-util",
|
||||
"polkadot-overseer",
|
||||
"polkadot-parachain",
|
||||
|
||||
@@ -16,6 +16,7 @@ Best/MS
|
||||
BlockId
|
||||
BlockNumber
|
||||
BridgeStorage
|
||||
clonable
|
||||
CLI/MS
|
||||
Chain1
|
||||
Chain2
|
||||
@@ -177,6 +178,7 @@ plancks
|
||||
polkadot/MS
|
||||
pov-block/MS
|
||||
precommit
|
||||
proc-macro/MS
|
||||
prometheus
|
||||
proxying
|
||||
provisioner/MS
|
||||
|
||||
@@ -60,7 +60,7 @@ pub type ChainId = u32;
|
||||
/// A hash of some data used by the relay chain.
|
||||
pub type Hash = sp_core::H256;
|
||||
|
||||
/// Unit type wrapper around [`Hash`] that represents a candidate hash.
|
||||
/// Unit type wrapper around [`type@Hash`] that represents a candidate hash.
|
||||
///
|
||||
/// This type is produced by [`CandidateReceipt::hash`].
|
||||
///
|
||||
|
||||
@@ -22,9 +22,9 @@ use futures::{channel::mpsc, future::FutureExt, join, select, sink::SinkExt, str
|
||||
use parity_scale_codec::Encode;
|
||||
use polkadot_node_primitives::{AvailableData, CollationGenerationConfig, PoV};
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{AllMessages, CollationGenerationMessage, CollatorProtocolMessage},
|
||||
messages::{CollationGenerationMessage, CollatorProtocolMessage},
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext,
|
||||
SubsystemError, SubsystemResult, SubsystemSender,
|
||||
SubsystemError, SubsystemResult,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{
|
||||
request_availability_cores, request_persisted_validation_data, request_validation_code,
|
||||
@@ -54,6 +54,7 @@ pub struct CollationGenerationSubsystem {
|
||||
metrics: Metrics,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)]
|
||||
impl CollationGenerationSubsystem {
|
||||
/// Create a new instance of the `CollationGenerationSubsystem`.
|
||||
pub fn new(metrics: Metrics) -> Self {
|
||||
@@ -71,11 +72,7 @@ impl CollationGenerationSubsystem {
|
||||
///
|
||||
/// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur.
|
||||
/// Otherwise, most are logged and then discarded.
|
||||
async fn run<Context>(mut self, mut ctx: Context)
|
||||
where
|
||||
Context: SubsystemContext<Message = CollationGenerationMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollationGenerationMessage>,
|
||||
{
|
||||
async fn run<Context>(mut self, mut ctx: Context) {
|
||||
// when we activate new leaves, we spawn a bunch of sub-tasks, each of which is
|
||||
// expected to generate precisely one message. We don't want to block the main loop
|
||||
// at any point waiting for them all, so instead, we create a channel on which they can
|
||||
@@ -108,12 +105,8 @@ impl CollationGenerationSubsystem {
|
||||
&mut self,
|
||||
incoming: SubsystemResult<FromOverseer<<Context as SubsystemContext>::Message>>,
|
||||
ctx: &mut Context,
|
||||
sender: &mpsc::Sender<AllMessages>,
|
||||
) -> bool
|
||||
where
|
||||
Context: SubsystemContext<Message = CollationGenerationMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollationGenerationMessage>,
|
||||
{
|
||||
sender: &mpsc::Sender<overseer::CollationGenerationOutgoingMessages>,
|
||||
) -> bool {
|
||||
match incoming {
|
||||
Ok(FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
|
||||
activated,
|
||||
@@ -162,11 +155,8 @@ impl CollationGenerationSubsystem {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for CollationGenerationSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = CollationGenerationMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollationGenerationMessage>,
|
||||
{
|
||||
#[overseer::subsystem(CollationGeneration, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context> CollationGenerationSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = async move {
|
||||
self.run(ctx).await;
|
||||
@@ -178,12 +168,13 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_new_activations<Context: SubsystemContext>(
|
||||
#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)]
|
||||
async fn handle_new_activations<Context>(
|
||||
config: Arc<CollationGenerationConfig>,
|
||||
activated: impl IntoIterator<Item = Hash>,
|
||||
ctx: &mut Context,
|
||||
metrics: Metrics,
|
||||
sender: &mpsc::Sender<AllMessages>,
|
||||
sender: &mpsc::Sender<overseer::CollationGenerationOutgoingMessages>,
|
||||
) -> crate::error::Result<()> {
|
||||
// follow the procedure from the guide:
|
||||
// https://w3f.github.io/parachain-implementers-guide/node/collators/collation-generation.html
|
||||
@@ -393,9 +384,10 @@ async fn handle_new_activations<Context: SubsystemContext>(
|
||||
metrics.on_collation_generated();
|
||||
|
||||
if let Err(err) = task_sender
|
||||
.send(AllMessages::CollatorProtocol(
|
||||
CollatorProtocolMessage::DistributeCollation(ccr, pov, result_sender),
|
||||
))
|
||||
.send(
|
||||
CollatorProtocolMessage::DistributeCollation(ccr, pov, result_sender)
|
||||
.into(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
gum::warn!(
|
||||
@@ -417,7 +409,7 @@ async fn obtain_current_validation_code_hash(
|
||||
relay_parent: Hash,
|
||||
para_id: ParaId,
|
||||
assumption: OccupiedCoreAssumption,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::CollationGenerationSenderTrait,
|
||||
) -> Result<Option<ValidationCodeHash>, crate::error::Error> {
|
||||
use polkadot_node_subsystem::RuntimeApiError;
|
||||
|
||||
|
||||
@@ -296,7 +296,7 @@ mod handle_new_activations {
|
||||
*subsystem_sent_messages.lock().await = rx.collect().await;
|
||||
});
|
||||
|
||||
let sent_messages = Arc::try_unwrap(sent_messages)
|
||||
let mut sent_messages = Arc::try_unwrap(sent_messages)
|
||||
.expect("subsystem should have shut down by now")
|
||||
.into_inner();
|
||||
|
||||
@@ -328,7 +328,7 @@ mod handle_new_activations {
|
||||
};
|
||||
|
||||
assert_eq!(sent_messages.len(), 1);
|
||||
match &sent_messages[0] {
|
||||
match AllMessages::from(sent_messages.pop().unwrap()) {
|
||||
AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation(
|
||||
CandidateReceipt { descriptor, .. },
|
||||
_pov,
|
||||
@@ -356,7 +356,7 @@ mod handle_new_activations {
|
||||
expect_descriptor.erasure_root = descriptor.erasure_root.clone();
|
||||
expect_descriptor
|
||||
};
|
||||
assert_eq!(descriptor, &expect_descriptor);
|
||||
assert_eq!(descriptor, expect_descriptor);
|
||||
},
|
||||
_ => panic!("received wrong message type"),
|
||||
}
|
||||
@@ -470,11 +470,13 @@ mod handle_new_activations {
|
||||
|
||||
assert_eq!(sent_messages.len(), 1);
|
||||
match &sent_messages[0] {
|
||||
AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation(
|
||||
CandidateReceipt { descriptor, .. },
|
||||
_pov,
|
||||
..,
|
||||
)) => {
|
||||
overseer::CollationGenerationOutgoingMessages::CollatorProtocolMessage(
|
||||
CollatorProtocolMessage::DistributeCollation(
|
||||
CandidateReceipt { descriptor, .. },
|
||||
_pov,
|
||||
..,
|
||||
),
|
||||
) => {
|
||||
assert_eq!(expect_validation_code_hash, descriptor.validation_code_hash);
|
||||
},
|
||||
_ => panic!("received wrong message type"),
|
||||
|
||||
@@ -38,7 +38,7 @@ use polkadot_node_subsystem::{
|
||||
ApprovalDistributionMessage, ChainApiMessage, ChainSelectionMessage, RuntimeApiMessage,
|
||||
RuntimeApiRequest,
|
||||
},
|
||||
overseer, RuntimeApiError, SubsystemContext, SubsystemError, SubsystemResult,
|
||||
overseer, RuntimeApiError, SubsystemError, SubsystemResult,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{
|
||||
determine_new_blocks,
|
||||
@@ -107,8 +107,9 @@ enum ImportedBlockInfoError {
|
||||
}
|
||||
|
||||
/// Computes information about the imported block. Returns an error if the info couldn't be extracted.
|
||||
async fn imported_block_info(
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
|
||||
async fn imported_block_info<Context>(
|
||||
ctx: &mut Context,
|
||||
env: ImportedBlockInfoEnv<'_>,
|
||||
block_hash: Hash,
|
||||
block_header: &Header,
|
||||
@@ -319,10 +320,11 @@ pub struct BlockImportedCandidates {
|
||||
/// * and return information about all candidates imported under each block.
|
||||
///
|
||||
/// It is the responsibility of the caller to schedule wakeups for each block.
|
||||
pub(crate) async fn handle_new_head(
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
|
||||
pub(crate) async fn handle_new_head<Context, B: Backend>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
db: &mut OverlayedBackend<'_, impl Backend>,
|
||||
db: &mut OverlayedBackend<'_, B>,
|
||||
head: Hash,
|
||||
finalized_number: &Option<BlockNumber>,
|
||||
) -> SubsystemResult<Vec<BlockImportedCandidates>> {
|
||||
@@ -609,7 +611,7 @@ pub(crate) mod tests {
|
||||
use assert_matches::assert_matches;
|
||||
use merlin::Transcript;
|
||||
use polkadot_node_primitives::approval::{VRFOutput, VRFProof};
|
||||
use polkadot_node_subsystem::messages::AllMessages;
|
||||
use polkadot_node_subsystem::messages::{AllMessages, ApprovalVotingMessage};
|
||||
use polkadot_node_subsystem_test_helpers::make_subsystem_context;
|
||||
use polkadot_node_subsystem_util::database::Database;
|
||||
use polkadot_primitives::v2::{Id as ParaId, SessionInfo, ValidatorIndex};
|
||||
@@ -724,7 +726,8 @@ pub(crate) mod tests {
|
||||
#[test]
|
||||
fn imported_block_info_is_good() {
|
||||
let pool = TaskExecutor::new();
|
||||
let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
|
||||
let (mut ctx, mut handle) =
|
||||
make_subsystem_context::<ApprovalVotingMessage, _>(pool.clone());
|
||||
|
||||
let session = 5;
|
||||
let session_info = dummy_session_info(session);
|
||||
@@ -847,7 +850,8 @@ pub(crate) mod tests {
|
||||
#[test]
|
||||
fn imported_block_info_fails_if_no_babe_vrf() {
|
||||
let pool = TaskExecutor::new();
|
||||
let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
|
||||
let (mut ctx, mut handle) =
|
||||
make_subsystem_context::<ApprovalVotingMessage, _>(pool.clone());
|
||||
|
||||
let session = 5;
|
||||
let session_info = dummy_session_info(session);
|
||||
@@ -950,7 +954,8 @@ pub(crate) mod tests {
|
||||
#[test]
|
||||
fn imported_block_info_fails_if_ancient_session() {
|
||||
let pool = TaskExecutor::new();
|
||||
let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
|
||||
let (mut ctx, mut handle) =
|
||||
make_subsystem_context::<ApprovalVotingMessage, _>(pool.clone());
|
||||
|
||||
let session = 5;
|
||||
|
||||
@@ -1027,7 +1032,7 @@ pub(crate) mod tests {
|
||||
#[test]
|
||||
fn imported_block_info_extracts_force_approve() {
|
||||
let pool = TaskExecutor::new();
|
||||
let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
|
||||
let (mut ctx, mut handle) = make_subsystem_context(pool.clone());
|
||||
|
||||
let session = 5;
|
||||
let session_info = dummy_session_info(session);
|
||||
@@ -1158,7 +1163,8 @@ pub(crate) mod tests {
|
||||
let mut overlay_db = OverlayedBackend::new(&db);
|
||||
|
||||
let pool = TaskExecutor::new();
|
||||
let (mut ctx, mut handle) = make_subsystem_context::<(), _>(pool.clone());
|
||||
let (mut ctx, mut handle) =
|
||||
make_subsystem_context::<ApprovalVotingMessage, _>(pool.clone());
|
||||
|
||||
let session = 5;
|
||||
let irrelevant = 666;
|
||||
|
||||
@@ -37,9 +37,8 @@ use polkadot_node_subsystem::{
|
||||
ChainSelectionMessage, DisputeCoordinatorMessage, HighestApprovedAncestorBlock,
|
||||
RuntimeApiMessage, RuntimeApiRequest,
|
||||
},
|
||||
overseer::{self, SubsystemSender as _},
|
||||
FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError,
|
||||
SubsystemResult, SubsystemSender,
|
||||
overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, SubsystemResult,
|
||||
SubsystemSender,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{
|
||||
database::Database,
|
||||
@@ -355,11 +354,8 @@ impl ApprovalVotingSubsystem {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for ApprovalVotingSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = ApprovalVotingMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ApprovalVotingMessage>,
|
||||
{
|
||||
#[overseer::subsystem(ApprovalVoting, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Context: Send> ApprovalVotingSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let backend = DbBackend::new(self.db.clone(), self.db_config);
|
||||
let future = run::<DbBackend, Context>(
|
||||
@@ -597,27 +593,34 @@ struct State {
|
||||
assignment_criteria: Box<dyn AssignmentCriteria + Send + Sync>,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
|
||||
impl State {
|
||||
fn session_info(&self, i: SessionIndex) -> Option<&SessionInfo> {
|
||||
self.session_window.as_ref().and_then(|w| w.session_info(i))
|
||||
}
|
||||
|
||||
/// Bring `session_window` up to date.
|
||||
pub async fn cache_session_info_for_head(
|
||||
pub async fn cache_session_info_for_head<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
head: Hash,
|
||||
) -> Result<Option<SessionWindowUpdate>, SessionsUnavailable> {
|
||||
) -> Result<Option<SessionWindowUpdate>, SessionsUnavailable>
|
||||
where
|
||||
<Context as overseer::SubsystemContext>::Sender: Sized + Send,
|
||||
{
|
||||
let session_window = self.session_window.take();
|
||||
match session_window {
|
||||
None => {
|
||||
let sender = ctx.sender().clone();
|
||||
self.session_window =
|
||||
Some(RollingSessionWindow::new(ctx, APPROVAL_SESSIONS, head).await?);
|
||||
Some(RollingSessionWindow::new(sender, APPROVAL_SESSIONS, head).await?);
|
||||
Ok(None)
|
||||
},
|
||||
Some(mut session_window) => {
|
||||
let r =
|
||||
session_window.cache_session_info_for_head(ctx, head).await.map(Option::Some);
|
||||
let r = session_window
|
||||
.cache_session_info_for_head(ctx.sender(), head)
|
||||
.await
|
||||
.map(Option::Some);
|
||||
self.session_window = Some(session_window);
|
||||
r
|
||||
},
|
||||
@@ -701,6 +704,7 @@ enum Action {
|
||||
Conclude,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
|
||||
async fn run<B, Context>(
|
||||
mut ctx: Context,
|
||||
mut subsystem: ApprovalVotingSubsystem,
|
||||
@@ -709,8 +713,6 @@ async fn run<B, Context>(
|
||||
mut backend: B,
|
||||
) -> SubsystemResult<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = ApprovalVotingMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ApprovalVotingMessage>,
|
||||
B: Backend,
|
||||
{
|
||||
let mut state = State {
|
||||
@@ -848,9 +850,9 @@ where
|
||||
// https://github.com/paritytech/polkadot/issues/3311
|
||||
//
|
||||
// returns `true` if any of the actions was a `Conclude` command.
|
||||
async fn handle_actions(
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalVotingMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalVotingMessage>),
|
||||
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
|
||||
async fn handle_actions<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
overlayed_db: &mut OverlayedBackend<'_, impl Backend>,
|
||||
metrics: &Metrics,
|
||||
@@ -868,7 +870,6 @@ async fn handle_actions(
|
||||
Action::ScheduleWakeup { block_hash, block_number, candidate_hash, tick } =>
|
||||
wakeups.schedule(block_hash, block_number, candidate_hash, tick),
|
||||
Action::IssueApproval(candidate_hash, approval_request) => {
|
||||
let mut sender = ctx.sender().clone();
|
||||
// Note that the IssueApproval action will create additional
|
||||
// actions that will need to all be processed before we can
|
||||
// handle the next action in the set passed to the ambient
|
||||
@@ -881,7 +882,7 @@ async fn handle_actions(
|
||||
// Note that chaining these iterators is O(n) as we must consume
|
||||
// the prior iterator.
|
||||
let next_actions: Vec<Action> = issue_approval(
|
||||
&mut sender,
|
||||
ctx,
|
||||
state,
|
||||
overlayed_db,
|
||||
metrics,
|
||||
@@ -1086,9 +1087,9 @@ fn distribution_messages_for_activation(
|
||||
}
|
||||
|
||||
// Handle an incoming signal from the overseer. Returns true if execution should conclude.
|
||||
async fn handle_from_overseer(
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalVotingMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalVotingMessage>),
|
||||
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
|
||||
async fn handle_from_overseer<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
db: &mut OverlayedBackend<'_, impl Backend>,
|
||||
metrics: &Metrics,
|
||||
@@ -1197,8 +1198,9 @@ async fn handle_from_overseer(
|
||||
Ok(actions)
|
||||
}
|
||||
|
||||
async fn handle_approved_ancestor(
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
|
||||
async fn handle_approved_ancestor<Context>(
|
||||
ctx: &mut Context,
|
||||
db: &OverlayedBackend<'_, impl Backend>,
|
||||
target: Hash,
|
||||
lower_bound: BlockNumber,
|
||||
@@ -2147,9 +2149,9 @@ fn process_wakeup(
|
||||
// Launch approval work, returning an `AbortHandle` which corresponds to the background task
|
||||
// spawned. When the background work is no longer needed, the `AbortHandle` should be dropped
|
||||
// to cancel the background work and any requests it has spawned.
|
||||
async fn launch_approval(
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalVotingMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalVotingMessage>),
|
||||
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
|
||||
async fn launch_approval<Context>(
|
||||
ctx: &mut Context,
|
||||
metrics: Metrics,
|
||||
session_index: SessionIndex,
|
||||
candidate: CandidateReceipt,
|
||||
@@ -2242,15 +2244,12 @@ async fn launch_approval(
|
||||
);
|
||||
|
||||
sender
|
||||
.send_message(
|
||||
DisputeCoordinatorMessage::IssueLocalStatement(
|
||||
session_index,
|
||||
candidate_hash,
|
||||
candidate.clone(),
|
||||
false,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
.send_message(DisputeCoordinatorMessage::IssueLocalStatement(
|
||||
session_index,
|
||||
candidate_hash,
|
||||
candidate.clone(),
|
||||
false,
|
||||
))
|
||||
.await;
|
||||
metrics_guard.take().on_approval_invalid();
|
||||
},
|
||||
@@ -2281,17 +2280,14 @@ async fn launch_approval(
|
||||
let (val_tx, val_rx) = oneshot::channel();
|
||||
|
||||
sender
|
||||
.send_message(
|
||||
CandidateValidationMessage::ValidateFromExhaustive(
|
||||
available_data.validation_data,
|
||||
validation_code,
|
||||
candidate.clone(),
|
||||
available_data.pov,
|
||||
APPROVAL_EXECUTION_TIMEOUT,
|
||||
val_tx,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
.send_message(CandidateValidationMessage::ValidateFromExhaustive(
|
||||
available_data.validation_data,
|
||||
validation_code,
|
||||
candidate.clone(),
|
||||
available_data.pov,
|
||||
APPROVAL_EXECUTION_TIMEOUT,
|
||||
val_tx,
|
||||
))
|
||||
.await;
|
||||
|
||||
match val_rx.await {
|
||||
@@ -2309,15 +2305,12 @@ async fn launch_approval(
|
||||
} else {
|
||||
// Commitments mismatch - issue a dispute.
|
||||
sender
|
||||
.send_message(
|
||||
DisputeCoordinatorMessage::IssueLocalStatement(
|
||||
session_index,
|
||||
candidate_hash,
|
||||
candidate.clone(),
|
||||
false,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
.send_message(DisputeCoordinatorMessage::IssueLocalStatement(
|
||||
session_index,
|
||||
candidate_hash,
|
||||
candidate.clone(),
|
||||
false,
|
||||
))
|
||||
.await;
|
||||
|
||||
metrics_guard.take().on_approval_invalid();
|
||||
@@ -2334,15 +2327,12 @@ async fn launch_approval(
|
||||
);
|
||||
|
||||
sender
|
||||
.send_message(
|
||||
DisputeCoordinatorMessage::IssueLocalStatement(
|
||||
session_index,
|
||||
candidate_hash,
|
||||
candidate.clone(),
|
||||
false,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
.send_message(DisputeCoordinatorMessage::IssueLocalStatement(
|
||||
session_index,
|
||||
candidate_hash,
|
||||
candidate.clone(),
|
||||
false,
|
||||
))
|
||||
.await;
|
||||
|
||||
metrics_guard.take().on_approval_invalid();
|
||||
@@ -2368,8 +2358,9 @@ async fn launch_approval(
|
||||
|
||||
// Issue and import a local approval vote. Should only be invoked after approval checks
|
||||
// have been done.
|
||||
async fn issue_approval(
|
||||
ctx: &mut impl SubsystemSender,
|
||||
#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)]
|
||||
async fn issue_approval<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
db: &mut OverlayedBackend<'_, impl Backend>,
|
||||
metrics: &Metrics,
|
||||
@@ -2527,15 +2518,14 @@ async fn issue_approval(
|
||||
metrics.on_approval_produced();
|
||||
|
||||
// dispatch to approval distribution.
|
||||
ctx.send_unbounded_message(
|
||||
ApprovalDistributionMessage::DistributeApproval(IndirectSignedApprovalVote {
|
||||
ctx.send_unbounded_message(ApprovalDistributionMessage::DistributeApproval(
|
||||
IndirectSignedApprovalVote {
|
||||
block_hash,
|
||||
candidate_index: candidate_index as _,
|
||||
validator: validator_index,
|
||||
signature: sig,
|
||||
})
|
||||
.into(),
|
||||
);
|
||||
},
|
||||
));
|
||||
|
||||
// dispatch to dispute coordinator.
|
||||
actions.extend(inform_disputes_action);
|
||||
|
||||
@@ -36,8 +36,7 @@ use polkadot_node_primitives::{AvailableData, ErasureChunk};
|
||||
use polkadot_node_subsystem::{
|
||||
errors::{ChainApiError, RuntimeApiError},
|
||||
messages::{AvailabilityStoreMessage, ChainApiMessage},
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext,
|
||||
SubsystemError,
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError,
|
||||
};
|
||||
use polkadot_node_subsystem_util as util;
|
||||
use polkadot_primitives::v2::{
|
||||
@@ -519,23 +518,17 @@ impl KnownUnfinalizedBlocks {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for AvailabilityStoreSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
{
|
||||
#[overseer::subsystem(AvailabilityStore, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context> AvailabilityStoreSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = run(self, ctx).map(|_| Ok(())).boxed();
|
||||
let future = run::<Context>(self, ctx).map(|_| Ok(())).boxed();
|
||||
|
||||
SpawnedSubsystem { name: "availability-store-subsystem", future }
|
||||
}
|
||||
}
|
||||
|
||||
async fn run<Context>(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context)
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
{
|
||||
#[overseer::contextbounds(AvailabilityStore, prefix = self::overseer)]
|
||||
async fn run<Context>(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context) {
|
||||
let mut next_pruning = Delay::new(subsystem.pruning_config.pruning_interval).fuse();
|
||||
|
||||
loop {
|
||||
@@ -556,15 +549,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(AvailabilityStore, prefix = self::overseer)]
|
||||
async fn run_iteration<Context>(
|
||||
ctx: &mut Context,
|
||||
subsystem: &mut AvailabilityStoreSubsystem,
|
||||
mut next_pruning: &mut future::Fuse<Delay>,
|
||||
) -> Result<bool, Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
{
|
||||
) -> Result<bool, Error> {
|
||||
select! {
|
||||
incoming = ctx.recv().fuse() => {
|
||||
match incoming.map_err(|_| Error::ContextChannelClosed)? {
|
||||
@@ -608,15 +598,12 @@ where
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(AvailabilityStore, prefix = self::overseer)]
|
||||
async fn process_block_activated<Context>(
|
||||
ctx: &mut Context,
|
||||
subsystem: &mut AvailabilityStoreSubsystem,
|
||||
activated: Hash,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
{
|
||||
) -> Result<(), Error> {
|
||||
let now = subsystem.clock.now()?;
|
||||
|
||||
let block_header = {
|
||||
@@ -663,6 +650,7 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(AvailabilityStore, prefix = self::overseer)]
|
||||
async fn process_new_head<Context>(
|
||||
ctx: &mut Context,
|
||||
db: &Arc<dyn Database>,
|
||||
@@ -672,11 +660,7 @@ async fn process_new_head<Context>(
|
||||
now: Duration,
|
||||
hash: Hash,
|
||||
header: Header,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
{
|
||||
) -> Result<(), Error> {
|
||||
let candidate_events = util::request_candidate_events(hash, ctx.sender()).await.await??;
|
||||
|
||||
// We need to request the number of validators based on the parent state,
|
||||
@@ -814,16 +798,13 @@ macro_rules! peek_num {
|
||||
};
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(AvailabilityStore, prefix = self::overseer)]
|
||||
async fn process_block_finalized<Context>(
|
||||
ctx: &mut Context,
|
||||
subsystem: &AvailabilityStoreSubsystem,
|
||||
finalized_hash: Hash,
|
||||
finalized_number: BlockNumber,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityStoreMessage>,
|
||||
{
|
||||
) -> Result<(), Error> {
|
||||
let now = subsystem.clock.now()?;
|
||||
|
||||
let mut next_possible_batch = 0;
|
||||
|
||||
@@ -309,13 +309,13 @@ fn store_chunk_works() {
|
||||
let chunk_msg =
|
||||
AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk: chunk.clone(), tx };
|
||||
|
||||
overseer_send(&mut virtual_overseer, chunk_msg.into()).await;
|
||||
overseer_send(&mut virtual_overseer, chunk_msg).await;
|
||||
assert_eq!(rx.await.unwrap(), Ok(()));
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let query_chunk = AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx);
|
||||
|
||||
overseer_send(&mut virtual_overseer, query_chunk.into()).await;
|
||||
overseer_send(&mut virtual_overseer, query_chunk).await;
|
||||
|
||||
assert_eq!(rx.await.unwrap().unwrap(), chunk);
|
||||
virtual_overseer
|
||||
@@ -341,13 +341,13 @@ fn store_chunk_does_nothing_if_no_entry_already() {
|
||||
let chunk_msg =
|
||||
AvailabilityStoreMessage::StoreChunk { candidate_hash, chunk: chunk.clone(), tx };
|
||||
|
||||
overseer_send(&mut virtual_overseer, chunk_msg.into()).await;
|
||||
overseer_send(&mut virtual_overseer, chunk_msg).await;
|
||||
assert_eq!(rx.await.unwrap(), Err(()));
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let query_chunk = AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx);
|
||||
|
||||
overseer_send(&mut virtual_overseer, query_chunk.into()).await;
|
||||
overseer_send(&mut virtual_overseer, query_chunk).await;
|
||||
|
||||
assert!(rx.await.unwrap().is_none());
|
||||
virtual_overseer
|
||||
|
||||
@@ -37,13 +37,12 @@ use polkadot_node_primitives::{
|
||||
use polkadot_node_subsystem::{
|
||||
jaeger,
|
||||
messages::{
|
||||
AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage,
|
||||
CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage,
|
||||
DisputeCoordinatorMessage, ProvisionableData, ProvisionerMessage, RuntimeApiRequest,
|
||||
StatementDistributionMessage,
|
||||
AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage,
|
||||
CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage,
|
||||
ProvisionableData, ProvisionerMessage, RuntimeApiRequest, StatementDistributionMessage,
|
||||
},
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan, SpawnedSubsystem,
|
||||
Stage, SubsystemContext, SubsystemError, SubsystemSender,
|
||||
Stage, SubsystemError,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{
|
||||
self as util, request_from_runtime, request_session_index_for_child, request_validator_groups,
|
||||
@@ -131,10 +130,10 @@ impl CandidateBackingSubsystem {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for CandidateBackingSubsystem
|
||||
#[overseer::subsystem(CandidateBacking, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Context> CandidateBackingSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = CandidateBackingMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CandidateBackingMessage>,
|
||||
Context: Send + Sync,
|
||||
{
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = async move {
|
||||
@@ -148,15 +147,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
|
||||
async fn run<Context>(
|
||||
mut ctx: Context,
|
||||
keystore: SyncCryptoStorePtr,
|
||||
metrics: Metrics,
|
||||
) -> FatalResult<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = CandidateBackingMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CandidateBackingMessage>,
|
||||
{
|
||||
) -> FatalResult<()> {
|
||||
let (background_validation_tx, mut background_validation_rx) = mpsc::channel(16);
|
||||
let mut jobs = HashMap::new();
|
||||
|
||||
@@ -180,6 +176,7 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
|
||||
async fn run_iteration<Context>(
|
||||
ctx: &mut Context,
|
||||
keystore: SyncCryptoStorePtr,
|
||||
@@ -187,11 +184,7 @@ async fn run_iteration<Context>(
|
||||
jobs: &mut HashMap<Hash, JobAndSpan<Context>>,
|
||||
background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
|
||||
background_validation_rx: &mut mpsc::Receiver<(Hash, ValidatedCandidateCommand)>,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = CandidateBackingMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CandidateBackingMessage>,
|
||||
{
|
||||
) -> Result<(), Error> {
|
||||
loop {
|
||||
futures::select!(
|
||||
validated_command = background_validation_rx.next().fuse() => {
|
||||
@@ -225,16 +218,13 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
|
||||
async fn handle_validated_candidate_command<Context>(
|
||||
ctx: &mut Context,
|
||||
jobs: &mut HashMap<Hash, JobAndSpan<Context>>,
|
||||
relay_parent: Hash,
|
||||
command: ValidatedCandidateCommand,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = CandidateBackingMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CandidateBackingMessage>,
|
||||
{
|
||||
) -> Result<(), Error> {
|
||||
if let Some(job) = jobs.get_mut(&relay_parent) {
|
||||
job.job.handle_validated_candidate_command(&job.span, ctx, command).await?;
|
||||
} else {
|
||||
@@ -245,15 +235,12 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
|
||||
async fn handle_communication<Context>(
|
||||
ctx: &mut Context,
|
||||
jobs: &mut HashMap<Hash, JobAndSpan<Context>>,
|
||||
message: CandidateBackingMessage,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = CandidateBackingMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CandidateBackingMessage>,
|
||||
{
|
||||
) -> Result<(), Error> {
|
||||
match message {
|
||||
CandidateBackingMessage::Second(relay_parent, candidate, pov) => {
|
||||
if let Some(job) = jobs.get_mut(&relay_parent) {
|
||||
@@ -274,6 +261,7 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
|
||||
async fn handle_active_leaves_update<Context>(
|
||||
ctx: &mut Context,
|
||||
update: ActiveLeavesUpdate,
|
||||
@@ -281,11 +269,7 @@ async fn handle_active_leaves_update<Context>(
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
|
||||
metrics: &Metrics,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = CandidateBackingMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CandidateBackingMessage>,
|
||||
{
|
||||
) -> Result<(), Error> {
|
||||
for deactivated in update.deactivated {
|
||||
jobs.remove(&deactivated);
|
||||
}
|
||||
@@ -578,22 +562,19 @@ fn table_attested_to_backed(
|
||||
}
|
||||
|
||||
async fn store_available_data(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::CandidateBackingSenderTrait,
|
||||
n_validators: u32,
|
||||
candidate_hash: CandidateHash,
|
||||
available_data: AvailableData,
|
||||
) -> Result<(), Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender
|
||||
.send_message(
|
||||
AvailabilityStoreMessage::StoreAvailableData {
|
||||
candidate_hash,
|
||||
n_validators,
|
||||
available_data,
|
||||
tx,
|
||||
}
|
||||
.into(),
|
||||
)
|
||||
.send_message(AvailabilityStoreMessage::StoreAvailableData {
|
||||
candidate_hash,
|
||||
n_validators,
|
||||
available_data,
|
||||
tx,
|
||||
})
|
||||
.await;
|
||||
|
||||
let _ = rx.await.map_err(Error::StoreAvailableData)?;
|
||||
@@ -605,8 +586,9 @@ async fn store_available_data(
|
||||
//
|
||||
// This will compute the erasure root internally and compare it to the expected erasure root.
|
||||
// This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`.
|
||||
|
||||
async fn make_pov_available(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::CandidateBackingSenderTrait,
|
||||
n_validators: usize,
|
||||
pov: Arc<PoV>,
|
||||
candidate_hash: CandidateHash,
|
||||
@@ -639,7 +621,7 @@ async fn make_pov_available(
|
||||
}
|
||||
|
||||
async fn request_pov(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::CandidateBackingSenderTrait,
|
||||
relay_parent: Hash,
|
||||
from_validator: ValidatorIndex,
|
||||
candidate_hash: CandidateHash,
|
||||
@@ -647,16 +629,13 @@ async fn request_pov(
|
||||
) -> Result<Arc<PoV>, Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender
|
||||
.send_message(
|
||||
AvailabilityDistributionMessage::FetchPoV {
|
||||
relay_parent,
|
||||
from_validator,
|
||||
candidate_hash,
|
||||
pov_hash,
|
||||
tx,
|
||||
}
|
||||
.into(),
|
||||
)
|
||||
.send_message(AvailabilityDistributionMessage::FetchPoV {
|
||||
relay_parent,
|
||||
from_validator,
|
||||
candidate_hash,
|
||||
pov_hash,
|
||||
tx,
|
||||
})
|
||||
.await;
|
||||
|
||||
let pov = rx.await.map_err(|_| Error::FetchPoV)?;
|
||||
@@ -664,22 +643,19 @@ async fn request_pov(
|
||||
}
|
||||
|
||||
async fn request_candidate_validation(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::CandidateBackingSenderTrait,
|
||||
candidate_receipt: CandidateReceipt,
|
||||
pov: Arc<PoV>,
|
||||
) -> Result<ValidationResult, Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
sender
|
||||
.send_message(
|
||||
CandidateValidationMessage::ValidateFromChainState(
|
||||
candidate_receipt,
|
||||
pov,
|
||||
BACKING_EXECUTION_TIMEOUT,
|
||||
tx,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
.send_message(CandidateValidationMessage::ValidateFromChainState(
|
||||
candidate_receipt,
|
||||
pov,
|
||||
BACKING_EXECUTION_TIMEOUT,
|
||||
tx,
|
||||
))
|
||||
.await;
|
||||
|
||||
match rx.await {
|
||||
@@ -692,7 +668,7 @@ async fn request_candidate_validation(
|
||||
type BackgroundValidationResult =
|
||||
Result<(CandidateReceipt, CandidateCommitments, Arc<PoV>), CandidateReceipt>;
|
||||
|
||||
struct BackgroundValidationParams<S: overseer::SubsystemSender<AllMessages>, F> {
|
||||
struct BackgroundValidationParams<S: overseer::CandidateBackingSenderTrait, F> {
|
||||
sender: S,
|
||||
tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>,
|
||||
candidate: CandidateReceipt,
|
||||
@@ -705,7 +681,7 @@ struct BackgroundValidationParams<S: overseer::SubsystemSender<AllMessages>, F>
|
||||
|
||||
async fn validate_and_make_available(
|
||||
params: BackgroundValidationParams<
|
||||
impl SubsystemSender,
|
||||
impl overseer::CandidateBackingSenderTrait,
|
||||
impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Sync,
|
||||
>,
|
||||
) -> Result<(), Error> {
|
||||
@@ -809,11 +785,8 @@ async fn validate_and_make_available(
|
||||
|
||||
struct ValidatorIndexOutOfBounds;
|
||||
|
||||
impl<Context> CandidateBackingJob<Context>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
Context: overseer::SubsystemContext,
|
||||
{
|
||||
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
|
||||
impl<Context> CandidateBackingJob<Context> {
|
||||
async fn handle_validated_candidate_command(
|
||||
&mut self,
|
||||
root_span: &jaeger::Span,
|
||||
@@ -896,7 +869,7 @@ where
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
params: BackgroundValidationParams<
|
||||
impl SubsystemSender,
|
||||
impl overseer::CandidateBackingSenderTrait,
|
||||
impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync,
|
||||
>,
|
||||
) -> Result<(), Error> {
|
||||
@@ -1001,7 +974,7 @@ where
|
||||
}
|
||||
|
||||
/// Check if there have happened any new misbehaviors and issue necessary messages.
|
||||
fn issue_new_misbehaviors(&mut self, ctx: &mut Context) {
|
||||
fn issue_new_misbehaviors(&mut self, sender: &mut impl overseer::CandidateBackingSenderTrait) {
|
||||
// collect the misbehaviors to avoid double mutable self borrow issues
|
||||
let misbehaviors: Vec<_> = self.table.drain_misbehaviors().collect();
|
||||
for (validator_id, report) in misbehaviors {
|
||||
@@ -1010,7 +983,7 @@ where
|
||||
//
|
||||
// Misbehaviors are bounded by the number of validators and
|
||||
// the block production protocol.
|
||||
ctx.send_unbounded_message(ProvisionerMessage::ProvisionableData(
|
||||
sender.send_unbounded_message(ProvisionerMessage::ProvisionableData(
|
||||
self.parent,
|
||||
ProvisionableData::MisbehaviorReport(self.parent, validator_id, report),
|
||||
));
|
||||
@@ -1042,7 +1015,7 @@ where
|
||||
};
|
||||
|
||||
if let Err(ValidatorIndexOutOfBounds) = self
|
||||
.dispatch_new_statement_to_dispute_coordinator(ctx, candidate_hash, &statement)
|
||||
.dispatch_new_statement_to_dispute_coordinator(ctx.sender(), candidate_hash, &statement)
|
||||
.await
|
||||
{
|
||||
gum::warn!(
|
||||
@@ -1101,7 +1074,7 @@ where
|
||||
None
|
||||
};
|
||||
|
||||
self.issue_new_misbehaviors(ctx);
|
||||
self.issue_new_misbehaviors(ctx.sender());
|
||||
|
||||
// It is important that the child span is dropped before its parent span (`unbacked_span`)
|
||||
drop(import_statement_span);
|
||||
@@ -1123,8 +1096,8 @@ where
|
||||
/// the networking component responsible for feeding statements to the backing subsystem
|
||||
/// is meant to check the signature and provenance of all statements before submission.
|
||||
async fn dispatch_new_statement_to_dispute_coordinator(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
&self,
|
||||
sender: &mut impl overseer::CandidateBackingSenderTrait,
|
||||
candidate_hash: CandidateHash,
|
||||
statement: &SignedFullStatement,
|
||||
) -> Result<(), ValidatorIndexOutOfBounds> {
|
||||
@@ -1157,14 +1130,15 @@ where
|
||||
if let (Some(candidate_receipt), Some(dispute_statement)) =
|
||||
(maybe_candidate_receipt, maybe_signed_dispute_statement)
|
||||
{
|
||||
ctx.send_message(DisputeCoordinatorMessage::ImportStatements {
|
||||
candidate_hash,
|
||||
candidate_receipt,
|
||||
session: self.session_index,
|
||||
statements: vec![(dispute_statement, validator_index)],
|
||||
pending_confirmation: None,
|
||||
})
|
||||
.await;
|
||||
sender
|
||||
.send_message(DisputeCoordinatorMessage::ImportStatements {
|
||||
candidate_hash,
|
||||
candidate_receipt,
|
||||
session: self.session_index,
|
||||
statements: vec![(dispute_statement, validator_index)],
|
||||
pending_confirmation: None,
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -23,7 +23,10 @@ use assert_matches::assert_matches;
|
||||
use futures::{future, Future};
|
||||
use polkadot_node_primitives::{BlockData, InvalidCandidate};
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{CollatorProtocolMessage, RuntimeApiMessage, RuntimeApiRequest, ValidationFailed},
|
||||
messages::{
|
||||
AllMessages, CollatorProtocolMessage, RuntimeApiMessage, RuntimeApiRequest,
|
||||
ValidationFailed,
|
||||
},
|
||||
ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, LeafStatus, OverseerSignal,
|
||||
};
|
||||
use polkadot_node_subsystem_test_helpers as test_helpers;
|
||||
|
||||
@@ -34,7 +34,7 @@ use polkadot_node_subsystem::{
|
||||
AvailabilityStoreMessage, BitfieldDistributionMessage, BitfieldSigningMessage,
|
||||
RuntimeApiMessage, RuntimeApiRequest,
|
||||
},
|
||||
ActivatedLeaf, LeafStatus, PerLeafSpan, SubsystemSender,
|
||||
overseer, ActivatedLeaf, LeafStatus, PerLeafSpan, SubsystemSender,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{self as util, JobSender, JobSubsystem, JobTrait, Validator};
|
||||
use polkadot_primitives::v2::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex};
|
||||
@@ -53,7 +53,7 @@ const JOB_DELAY: Duration = Duration::from_millis(1500);
|
||||
const LOG_TARGET: &str = "parachain::bitfield-signing";
|
||||
|
||||
/// Each `BitfieldSigningJob` prepares a signed bitfield for a single relay parent.
|
||||
pub struct BitfieldSigningJob;
|
||||
pub struct BitfieldSigningJob<Sender>(std::marker::PhantomData<Sender>);
|
||||
|
||||
/// Errors we may encounter in the course of executing the `BitfieldSigningSubsystem`.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
@@ -83,7 +83,7 @@ pub enum Error {
|
||||
async fn get_core_availability(
|
||||
core: &CoreState,
|
||||
validator_idx: ValidatorIndex,
|
||||
sender: &Mutex<&mut impl SubsystemSender>,
|
||||
sender: &Mutex<&mut impl SubsystemSender<overseer::BitfieldSigningOutgoingMessages>>,
|
||||
span: &jaeger::Span,
|
||||
) -> Result<bool, Error> {
|
||||
if let &CoreState::Occupied(ref core) = core {
|
||||
@@ -122,7 +122,7 @@ async fn get_core_availability(
|
||||
/// delegates to the v1 runtime API
|
||||
async fn get_availability_cores(
|
||||
relay_parent: Hash,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl SubsystemSender<overseer::BitfieldSigningOutgoingMessages>,
|
||||
) -> Result<Vec<CoreState>, Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender
|
||||
@@ -146,7 +146,7 @@ async fn construct_availability_bitfield(
|
||||
relay_parent: Hash,
|
||||
span: &jaeger::Span,
|
||||
validator_idx: ValidatorIndex,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl SubsystemSender<overseer::BitfieldSigningOutgoingMessages>,
|
||||
) -> Result<AvailabilityBitfield, Error> {
|
||||
// get the set of availability cores from the runtime
|
||||
let availability_cores = {
|
||||
@@ -182,8 +182,13 @@ async fn construct_availability_bitfield(
|
||||
Ok(AvailabilityBitfield(core_bits))
|
||||
}
|
||||
|
||||
impl JobTrait for BitfieldSigningJob {
|
||||
impl<Sender> JobTrait for BitfieldSigningJob<Sender>
|
||||
where
|
||||
Sender: overseer::BitfieldSigningSenderTrait + Unpin,
|
||||
{
|
||||
type ToJob = BitfieldSigningMessage;
|
||||
type OutgoingMessages = overseer::BitfieldSigningOutgoingMessages;
|
||||
type Sender = Sender;
|
||||
type Error = Error;
|
||||
type RunArgs = SyncCryptoStorePtr;
|
||||
type Metrics = Metrics;
|
||||
@@ -191,12 +196,12 @@ impl JobTrait for BitfieldSigningJob {
|
||||
const NAME: &'static str = "bitfield-signing-job";
|
||||
|
||||
/// Run a job for the parent block indicated
|
||||
fn run<S: SubsystemSender>(
|
||||
fn run(
|
||||
leaf: ActivatedLeaf,
|
||||
keystore: Self::RunArgs,
|
||||
metrics: Self::Metrics,
|
||||
_receiver: mpsc::Receiver<BitfieldSigningMessage>,
|
||||
mut sender: JobSender<S>,
|
||||
mut sender: JobSender<Sender>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
|
||||
let metrics = metrics.clone();
|
||||
async move {
|
||||
@@ -286,4 +291,5 @@ impl JobTrait for BitfieldSigningJob {
|
||||
}
|
||||
|
||||
/// `BitfieldSigningSubsystem` manages a number of bitfield signing jobs.
|
||||
pub type BitfieldSigningSubsystem<Spawner> = JobSubsystem<BitfieldSigningJob, Spawner>;
|
||||
pub type BitfieldSigningSubsystem<Spawner, Sender> =
|
||||
JobSubsystem<BitfieldSigningJob<Sender>, Spawner>;
|
||||
|
||||
@@ -35,8 +35,8 @@ use polkadot_node_subsystem::{
|
||||
CandidateValidationMessage, PreCheckOutcome, RuntimeApiMessage, RuntimeApiRequest,
|
||||
ValidationFailed,
|
||||
},
|
||||
overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError,
|
||||
SubsystemResult, SubsystemSender,
|
||||
overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, SubsystemResult,
|
||||
SubsystemSender,
|
||||
};
|
||||
use polkadot_parachain::primitives::{ValidationParams, ValidationResult as WasmValidationResult};
|
||||
use polkadot_primitives::v2::{
|
||||
@@ -93,11 +93,8 @@ impl CandidateValidationSubsystem {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for CandidateValidationSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = CandidateValidationMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CandidateValidationMessage>,
|
||||
{
|
||||
#[overseer::subsystem(CandidateValidation, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context> CandidateValidationSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = run(
|
||||
ctx,
|
||||
@@ -112,17 +109,14 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(CandidateValidation, prefix = self::overseer)]
|
||||
async fn run<Context>(
|
||||
mut ctx: Context,
|
||||
metrics: Metrics,
|
||||
pvf_metrics: polkadot_node_core_pvf::Metrics,
|
||||
cache_path: PathBuf,
|
||||
program_path: PathBuf,
|
||||
) -> SubsystemResult<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = CandidateValidationMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CandidateValidationMessage>,
|
||||
{
|
||||
) -> SubsystemResult<()> {
|
||||
let (validation_host, task) = polkadot_node_core_pvf::start(
|
||||
polkadot_node_core_pvf::Config::new(cache_path, program_path),
|
||||
pvf_metrics,
|
||||
@@ -235,7 +229,7 @@ async fn runtime_api_request<T, Sender>(
|
||||
receiver: oneshot::Receiver<Result<T, RuntimeApiError>>,
|
||||
) -> Result<T, RuntimeRequestFailed>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
sender
|
||||
.send_message(RuntimeApiMessage::Request(relay_parent, request).into())
|
||||
@@ -268,7 +262,7 @@ async fn request_validation_code_by_hash<Sender>(
|
||||
validation_code_hash: ValidationCodeHash,
|
||||
) -> Result<Option<ValidationCode>, RuntimeRequestFailed>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
runtime_api_request(
|
||||
@@ -287,7 +281,7 @@ async fn precheck_pvf<Sender>(
|
||||
validation_code_hash: ValidationCodeHash,
|
||||
) -> PreCheckOutcome
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
let validation_code =
|
||||
match request_validation_code_by_hash(sender, relay_parent, validation_code_hash).await {
|
||||
@@ -342,7 +336,7 @@ async fn check_assumption_validation_data<Sender>(
|
||||
assumption: OccupiedCoreAssumption,
|
||||
) -> AssumptionCheckOutcome
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
let validation_data = {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
@@ -386,7 +380,7 @@ async fn find_assumed_validation_data<Sender>(
|
||||
descriptor: &CandidateDescriptor,
|
||||
) -> AssumptionCheckOutcome
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
// The candidate descriptor has a `persisted_validation_data_hash` which corresponds to
|
||||
// one of up to two possible values that we can derive from the state of the
|
||||
@@ -421,7 +415,7 @@ pub async fn find_validation_data<Sender>(
|
||||
descriptor: &CandidateDescriptor,
|
||||
) -> Result<Option<(PersistedValidationData, ValidationCode)>, ValidationFailed>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
match find_assumed_validation_data(sender, &descriptor).await {
|
||||
AssumptionCheckOutcome::Matches(validation_data, validation_code) =>
|
||||
@@ -446,7 +440,7 @@ async fn validate_from_chain_state<Sender>(
|
||||
metrics: &Metrics,
|
||||
) -> Result<ValidationResult, ValidationFailed>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
let mut new_sender = sender.clone();
|
||||
let (validation_data, validation_code) =
|
||||
|
||||
@@ -39,7 +39,7 @@ use sp_blockchain::HeaderBackend;
|
||||
|
||||
use polkadot_node_subsystem::{
|
||||
messages::ChainApiMessage, overseer, FromOverseer, OverseerSignal, SpawnedSubsystem,
|
||||
SubsystemContext, SubsystemError, SubsystemResult,
|
||||
SubsystemError, SubsystemResult,
|
||||
};
|
||||
use polkadot_primitives::v2::{Block, BlockId};
|
||||
|
||||
@@ -64,11 +64,10 @@ impl<Client> ChainApiSubsystem<Client> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Client, Context> overseer::Subsystem<Context, SubsystemError> for ChainApiSubsystem<Client>
|
||||
#[overseer::subsystem(ChainApi, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Client, Context> ChainApiSubsystem<Client>
|
||||
where
|
||||
Client: HeaderBackend<Block> + AuxStore + 'static,
|
||||
Context: SubsystemContext<Message = ChainApiMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ChainApiMessage>,
|
||||
{
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = run::<Client, Context>(ctx, self)
|
||||
@@ -78,14 +77,13 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(ChainApi, prefix = self::overseer)]
|
||||
async fn run<Client, Context>(
|
||||
mut ctx: Context,
|
||||
subsystem: ChainApiSubsystem<Client>,
|
||||
) -> SubsystemResult<()>
|
||||
where
|
||||
Client: HeaderBackend<Block> + AuxStore,
|
||||
Context: SubsystemContext<Message = ChainApiMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ChainApiMessage>,
|
||||
{
|
||||
loop {
|
||||
match ctx.recv().await? {
|
||||
|
||||
@@ -20,7 +20,8 @@ use polkadot_node_primitives::BlockWeight;
|
||||
use polkadot_node_subsystem::{
|
||||
errors::ChainApiError,
|
||||
messages::{ChainApiMessage, ChainSelectionMessage},
|
||||
overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError,
|
||||
overseer::{self, SubsystemSender},
|
||||
FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError,
|
||||
};
|
||||
use polkadot_node_subsystem_util::database::Database;
|
||||
use polkadot_primitives::v2::{BlockNumber, ConsensusLog, Hash, Header};
|
||||
@@ -328,11 +329,8 @@ impl ChainSelectionSubsystem {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for ChainSelectionSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = ChainSelectionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ChainSelectionMessage>,
|
||||
{
|
||||
#[overseer::subsystem(ChainSelection, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Context> ChainSelectionSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let backend = db_backend::v1::DbBackend::new(
|
||||
self.db,
|
||||
@@ -348,14 +346,13 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(ChainSelection, prefix = self::overseer)]
|
||||
async fn run<Context, B>(
|
||||
mut ctx: Context,
|
||||
mut backend: B,
|
||||
stagnant_check_interval: StagnantCheckInterval,
|
||||
clock: Box<dyn Clock + Send + Sync>,
|
||||
) where
|
||||
Context: SubsystemContext<Message = ChainSelectionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ChainSelectionMessage>,
|
||||
B: Backend,
|
||||
{
|
||||
loop {
|
||||
@@ -363,7 +360,7 @@ async fn run<Context, B>(
|
||||
match res {
|
||||
Err(e) => {
|
||||
e.trace();
|
||||
// All errors right now are considered fatal:
|
||||
// All errors are considered fatal right now:
|
||||
break
|
||||
},
|
||||
Ok(()) => {
|
||||
@@ -379,6 +376,7 @@ async fn run<Context, B>(
|
||||
//
|
||||
// A return value of `Ok` indicates that an exit should be made, while non-fatal errors
|
||||
// lead to another call to this function.
|
||||
#[overseer::contextbounds(ChainSelection, prefix = self::overseer)]
|
||||
async fn run_until_error<Context, B>(
|
||||
ctx: &mut Context,
|
||||
backend: &mut B,
|
||||
@@ -386,8 +384,6 @@ async fn run_until_error<Context, B>(
|
||||
clock: &(dyn Clock + Sync),
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = ChainSelectionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ChainSelectionMessage>,
|
||||
B: Backend,
|
||||
{
|
||||
let mut stagnant_check_stream = stagnant_check_interval.timeout_stream();
|
||||
@@ -402,7 +398,7 @@ where
|
||||
FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => {
|
||||
for leaf in update.activated {
|
||||
let write_ops = handle_active_leaf(
|
||||
ctx,
|
||||
ctx.sender(),
|
||||
&*backend,
|
||||
clock.timestamp_now() + STAGNANT_TIMEOUT,
|
||||
leaf.hash,
|
||||
@@ -419,7 +415,7 @@ where
|
||||
handle_approved_block(backend, hash)?
|
||||
}
|
||||
ChainSelectionMessage::Leaves(tx) => {
|
||||
let leaves = load_leaves(ctx, &*backend).await?;
|
||||
let leaves = load_leaves(ctx.sender(), &*backend).await?;
|
||||
let _ = tx.send(leaves);
|
||||
}
|
||||
ChainSelectionMessage::BestLeafContaining(required, tx) => {
|
||||
@@ -446,11 +442,11 @@ where
|
||||
}
|
||||
|
||||
async fn fetch_finalized(
|
||||
ctx: &mut impl SubsystemContext,
|
||||
sender: &mut impl SubsystemSender<ChainApiMessage>,
|
||||
) -> Result<Option<(Hash, BlockNumber)>, Error> {
|
||||
let (number_tx, number_rx) = oneshot::channel();
|
||||
|
||||
ctx.send_message(ChainApiMessage::FinalizedBlockNumber(number_tx)).await;
|
||||
sender.send_message(ChainApiMessage::FinalizedBlockNumber(number_tx)).await;
|
||||
|
||||
let number = match number_rx.await? {
|
||||
Ok(number) => number,
|
||||
@@ -462,7 +458,7 @@ async fn fetch_finalized(
|
||||
|
||||
let (hash_tx, hash_rx) = oneshot::channel();
|
||||
|
||||
ctx.send_message(ChainApiMessage::FinalizedBlockHash(number, hash_tx)).await;
|
||||
sender.send_message(ChainApiMessage::FinalizedBlockHash(number, hash_tx)).await;
|
||||
|
||||
match hash_rx.await? {
|
||||
Err(err) => {
|
||||
@@ -478,11 +474,11 @@ async fn fetch_finalized(
|
||||
}
|
||||
|
||||
async fn fetch_header(
|
||||
ctx: &mut impl SubsystemContext,
|
||||
sender: &mut impl SubsystemSender<ChainApiMessage>,
|
||||
hash: Hash,
|
||||
) -> Result<Option<Header>, Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
ctx.send_message(ChainApiMessage::BlockHeader(hash, tx)).await;
|
||||
sender.send_message(ChainApiMessage::BlockHeader(hash, tx)).await;
|
||||
|
||||
Ok(rx.await?.unwrap_or_else(|err| {
|
||||
gum::warn!(target: LOG_TARGET, ?hash, ?err, "Missing hash for finalized block number");
|
||||
@@ -491,11 +487,11 @@ async fn fetch_header(
|
||||
}
|
||||
|
||||
async fn fetch_block_weight(
|
||||
ctx: &mut impl SubsystemContext,
|
||||
sender: &mut impl overseer::SubsystemSender<ChainApiMessage>,
|
||||
hash: Hash,
|
||||
) -> Result<Option<BlockWeight>, Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
ctx.send_message(ChainApiMessage::BlockWeight(hash, tx)).await;
|
||||
sender.send_message(ChainApiMessage::BlockWeight(hash, tx)).await;
|
||||
|
||||
let res = rx.await?;
|
||||
|
||||
@@ -507,7 +503,7 @@ async fn fetch_block_weight(
|
||||
|
||||
// Handle a new active leaf.
|
||||
async fn handle_active_leaf(
|
||||
ctx: &mut impl SubsystemContext,
|
||||
sender: &mut impl overseer::ChainSelectionSenderTrait,
|
||||
backend: &impl Backend,
|
||||
stagnant_at: Timestamp,
|
||||
hash: Hash,
|
||||
@@ -519,10 +515,10 @@ async fn handle_active_leaf(
|
||||
// tree.
|
||||
l.saturating_sub(1)
|
||||
},
|
||||
None => fetch_finalized(ctx).await?.map_or(1, |(_, n)| n),
|
||||
None => fetch_finalized(sender).await?.map_or(1, |(_, n)| n),
|
||||
};
|
||||
|
||||
let header = match fetch_header(ctx, hash).await? {
|
||||
let header = match fetch_header(sender, hash).await? {
|
||||
None => {
|
||||
gum::warn!(target: LOG_TARGET, ?hash, "Missing header for new head");
|
||||
return Ok(Vec::new())
|
||||
@@ -531,7 +527,7 @@ async fn handle_active_leaf(
|
||||
};
|
||||
|
||||
let new_blocks = polkadot_node_subsystem_util::determine_new_blocks(
|
||||
ctx.sender(),
|
||||
sender,
|
||||
|h| backend.load_block_entry(h).map(|b| b.is_some()),
|
||||
hash,
|
||||
&header,
|
||||
@@ -544,7 +540,7 @@ async fn handle_active_leaf(
|
||||
// determine_new_blocks gives blocks in descending order.
|
||||
// for this, we want ascending order.
|
||||
for (hash, header) in new_blocks.into_iter().rev() {
|
||||
let weight = match fetch_block_weight(ctx, hash).await? {
|
||||
let weight = match fetch_block_weight(sender, hash).await? {
|
||||
None => {
|
||||
gum::warn!(
|
||||
target: LOG_TARGET,
|
||||
@@ -654,13 +650,13 @@ fn detect_stagnant(backend: &mut impl Backend, now: Timestamp) -> Result<(), Err
|
||||
// Load the leaves from the backend. If there are no leaves, then return
|
||||
// the finalized block.
|
||||
async fn load_leaves(
|
||||
ctx: &mut impl SubsystemContext,
|
||||
sender: &mut impl overseer::SubsystemSender<ChainApiMessage>,
|
||||
backend: &impl Backend,
|
||||
) -> Result<Vec<Hash>, Error> {
|
||||
let leaves: Vec<_> = backend.load_leaves()?.into_hashes_descending().collect();
|
||||
|
||||
if leaves.is_empty() {
|
||||
Ok(fetch_finalized(ctx).await?.map_or(Vec::new(), |(h, _)| vec![h]))
|
||||
Ok(fetch_finalized(sender).await?.map_or(Vec::new(), |(h, _)| vec![h]))
|
||||
} else {
|
||||
Ok(leaves)
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ use polkadot_node_subsystem::{
|
||||
BlockDescription, DisputeCoordinatorMessage, DisputeDistributionMessage,
|
||||
ImportStatementsResult,
|
||||
},
|
||||
overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SubsystemContext,
|
||||
overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, OverseerSignal,
|
||||
};
|
||||
use polkadot_node_subsystem_util::rolling_session_window::{
|
||||
RollingSessionWindow, SessionWindowUpdate, SessionsUnavailable,
|
||||
@@ -83,6 +83,7 @@ pub struct Initialized {
|
||||
error: Option<SessionsUnavailable>,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
|
||||
impl Initialized {
|
||||
/// Make initialized subsystem, ready to `run`.
|
||||
pub fn new(
|
||||
@@ -123,8 +124,6 @@ impl Initialized {
|
||||
clock: Box<dyn Clock>,
|
||||
) -> FatalResult<()>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
B: Backend,
|
||||
{
|
||||
loop {
|
||||
@@ -161,8 +160,6 @@ impl Initialized {
|
||||
clock: &dyn Clock,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
B: Backend,
|
||||
{
|
||||
for (priority, request) in participations.drain(..) {
|
||||
@@ -253,10 +250,9 @@ impl Initialized {
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_active_leaves_update(
|
||||
async fn process_active_leaves_update<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = DisputeCoordinatorMessage>
|
||||
+ overseer::SubsystemContext<Message = DisputeCoordinatorMessage>),
|
||||
ctx: &mut Context,
|
||||
overlay_db: &mut OverlayedBackend<'_, impl Backend>,
|
||||
update: ActiveLeavesUpdate,
|
||||
now: u64,
|
||||
@@ -268,7 +264,7 @@ impl Initialized {
|
||||
if let Some(new_leaf) = update.activated {
|
||||
match self
|
||||
.rolling_session_window
|
||||
.cache_session_info_for_head(ctx, new_leaf.hash)
|
||||
.cache_session_info_for_head(ctx.sender(), new_leaf.hash)
|
||||
.await
|
||||
{
|
||||
Err(e) => {
|
||||
@@ -318,10 +314,9 @@ impl Initialized {
|
||||
|
||||
/// Scrapes on-chain votes (backing votes and concluded disputes) for a active leaf of the
|
||||
/// relay chain.
|
||||
async fn process_on_chain_votes(
|
||||
async fn process_on_chain_votes<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = DisputeCoordinatorMessage>
|
||||
+ overseer::SubsystemContext<Message = DisputeCoordinatorMessage>),
|
||||
ctx: &mut Context,
|
||||
overlay_db: &mut OverlayedBackend<'_, impl Backend>,
|
||||
votes: ScrapedOnChainVotes,
|
||||
now: u64,
|
||||
@@ -497,9 +492,9 @@ impl Initialized {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_incoming(
|
||||
async fn handle_incoming<Context>(
|
||||
&mut self,
|
||||
ctx: &mut impl SubsystemContext,
|
||||
ctx: &mut Context,
|
||||
overlay_db: &mut OverlayedBackend<'_, impl Backend>,
|
||||
message: DisputeCoordinatorMessage,
|
||||
now: Timestamp,
|
||||
@@ -634,9 +629,9 @@ impl Initialized {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_import_statements(
|
||||
async fn handle_import_statements<Context>(
|
||||
&mut self,
|
||||
ctx: &mut impl SubsystemContext,
|
||||
ctx: &mut Context,
|
||||
overlay_db: &mut OverlayedBackend<'_, impl Backend>,
|
||||
candidate_hash: CandidateHash,
|
||||
candidate_receipt: MaybeCandidateReceipt,
|
||||
@@ -923,9 +918,9 @@ impl Initialized {
|
||||
Ok(ImportStatementsResult::ValidImport)
|
||||
}
|
||||
|
||||
async fn issue_local_statement(
|
||||
async fn issue_local_statement<Context>(
|
||||
&mut self,
|
||||
ctx: &mut impl SubsystemContext,
|
||||
ctx: &mut Context,
|
||||
overlay_db: &mut OverlayedBackend<'_, impl Backend>,
|
||||
candidate_hash: CandidateHash,
|
||||
candidate_receipt: CandidateReceipt,
|
||||
@@ -1054,10 +1049,10 @@ enum MuxedMessage {
|
||||
Participation(participation::WorkerMessage),
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
|
||||
impl MuxedMessage {
|
||||
async fn receive(
|
||||
ctx: &mut (impl SubsystemContext<Message = DisputeCoordinatorMessage>
|
||||
+ overseer::SubsystemContext<Message = DisputeCoordinatorMessage>),
|
||||
async fn receive<Context>(
|
||||
ctx: &mut Context,
|
||||
from_sender: &mut participation::WorkerMessageReceiver,
|
||||
) -> FatalResult<Self> {
|
||||
// We are only fusing here to make `select` happy, in reality we will quit if the stream
|
||||
|
||||
@@ -32,8 +32,7 @@ use sc_keystore::LocalKeystore;
|
||||
|
||||
use polkadot_node_primitives::{CandidateVotes, DISPUTE_WINDOW};
|
||||
use polkadot_node_subsystem::{
|
||||
messages::DisputeCoordinatorMessage, overseer, ActivatedLeaf, FromOverseer, OverseerSignal,
|
||||
SpawnedSubsystem, SubsystemContext, SubsystemError,
|
||||
overseer, ActivatedLeaf, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{
|
||||
database::Database, rolling_session_window::RollingSessionWindow,
|
||||
@@ -124,11 +123,8 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for DisputeCoordinatorSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
{
|
||||
#[overseer::subsystem(DisputeCoordinator, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context: Send> DisputeCoordinatorSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = async {
|
||||
let backend = DbBackend::new(self.store.clone(), self.config.column_config());
|
||||
@@ -142,6 +138,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
|
||||
impl DisputeCoordinatorSubsystem {
|
||||
/// Create a new instance of the subsystem.
|
||||
pub fn new(
|
||||
@@ -161,8 +158,6 @@ impl DisputeCoordinatorSubsystem {
|
||||
clock: Box<dyn Clock>,
|
||||
) -> FatalResult<()>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
B: Backend + 'static,
|
||||
{
|
||||
let res = self.initialize(&mut ctx, backend, &*clock).await?;
|
||||
@@ -194,8 +189,6 @@ impl DisputeCoordinatorSubsystem {
|
||||
)>,
|
||||
>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
B: Backend + 'static,
|
||||
{
|
||||
loop {
|
||||
@@ -260,11 +253,7 @@ impl DisputeCoordinatorSubsystem {
|
||||
Vec<ScrapedOnChainVotes>,
|
||||
SpamSlots,
|
||||
ChainScraper,
|
||||
)>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
{
|
||||
)> {
|
||||
// Prune obsolete disputes:
|
||||
db::v1::note_current_session(overlay_db, rolling_session_window.latest_session())?;
|
||||
|
||||
@@ -358,17 +347,15 @@ impl DisputeCoordinatorSubsystem {
|
||||
}
|
||||
|
||||
/// Wait for `ActiveLeavesUpdate` on startup, returns `None` if `Conclude` signal came first.
|
||||
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
|
||||
async fn get_rolling_session_window<Context>(
|
||||
ctx: &mut Context,
|
||||
) -> Result<Option<(ActivatedLeaf, RollingSessionWindow)>>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
{
|
||||
if let Some(leaf) = wait_for_first_leaf(ctx).await? {
|
||||
) -> Result<Option<(ActivatedLeaf, RollingSessionWindow)>> {
|
||||
if let Some(leaf) = { wait_for_first_leaf(ctx) }.await? {
|
||||
let sender = ctx.sender().clone();
|
||||
Ok(Some((
|
||||
leaf.clone(),
|
||||
RollingSessionWindow::new(ctx, DISPUTE_WINDOW, leaf.hash)
|
||||
RollingSessionWindow::new(sender, DISPUTE_WINDOW, leaf.hash)
|
||||
.await
|
||||
.map_err(JfyiError::RollingSessionWindow)?,
|
||||
)))
|
||||
@@ -378,11 +365,8 @@ where
|
||||
}
|
||||
|
||||
/// Wait for `ActiveLeavesUpdate`, returns `None` if `Conclude` signal came first.
|
||||
async fn wait_for_first_leaf<Context>(ctx: &mut Context) -> Result<Option<ActivatedLeaf>>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
Context: SubsystemContext<Message = DisputeCoordinatorMessage>,
|
||||
{
|
||||
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
|
||||
async fn wait_for_first_leaf<Context>(ctx: &mut Context) -> Result<Option<ActivatedLeaf>> {
|
||||
loop {
|
||||
match ctx.recv().await? {
|
||||
FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(None),
|
||||
|
||||
@@ -28,7 +28,7 @@ use futures_timer::Delay;
|
||||
use polkadot_node_primitives::{ValidationResult, APPROVAL_EXECUTION_TIMEOUT};
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{AvailabilityRecoveryMessage, AvailabilityStoreMessage, CandidateValidationMessage},
|
||||
ActiveLeavesUpdate, RecoveryError, SubsystemContext, SubsystemSender,
|
||||
overseer, ActiveLeavesUpdate, RecoveryError,
|
||||
};
|
||||
use polkadot_node_subsystem_util::runtime::get_validation_code_by_hash;
|
||||
use polkadot_primitives::v2::{BlockNumber, CandidateHash, CandidateReceipt, Hash, SessionIndex};
|
||||
@@ -123,6 +123,7 @@ impl WorkerMessage {
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
|
||||
impl Participation {
|
||||
/// Get ready for managing dispute participation requests.
|
||||
///
|
||||
@@ -144,7 +145,7 @@ impl Participation {
|
||||
/// `on_active_leaves_update`, the participation will be launched right away.
|
||||
///
|
||||
/// Returns: false, if queues are already full.
|
||||
pub async fn queue_participation<Context: SubsystemContext>(
|
||||
pub async fn queue_participation<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
priority: ParticipationPriority,
|
||||
@@ -174,7 +175,7 @@ impl Participation {
|
||||
///
|
||||
/// Returns: The received `ParticipationStatement` or a fatal error, in case
|
||||
/// something went wrong when dequeuing more requests (tasks could not be spawned).
|
||||
pub async fn get_participation_result<Context: SubsystemContext>(
|
||||
pub async fn get_participation_result<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
msg: WorkerMessage,
|
||||
@@ -190,7 +191,7 @@ impl Participation {
|
||||
///
|
||||
/// Make sure we to dequeue participations if that became possible and update most recent
|
||||
/// block.
|
||||
pub async fn process_active_leaves_update<Context: SubsystemContext>(
|
||||
pub async fn process_active_leaves_update<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
update: &ActiveLeavesUpdate,
|
||||
@@ -212,7 +213,8 @@ impl Participation {
|
||||
}
|
||||
|
||||
/// Dequeue until `MAX_PARALLEL_PARTICIPATIONS` is reached.
|
||||
async fn dequeue_until_capacity<Context: SubsystemContext>(
|
||||
|
||||
async fn dequeue_until_capacity<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
recent_head: Hash,
|
||||
@@ -228,7 +230,7 @@ impl Participation {
|
||||
}
|
||||
|
||||
/// Fork a participation task in the background.
|
||||
fn fork_participation<Context: SubsystemContext>(
|
||||
fn fork_participation<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
req: ParticipationRequest,
|
||||
@@ -248,7 +250,7 @@ impl Participation {
|
||||
|
||||
async fn participate(
|
||||
mut result_sender: WorkerMessageSender,
|
||||
mut sender: impl SubsystemSender,
|
||||
mut sender: impl overseer::DisputeCoordinatorSenderTrait,
|
||||
block_hash: Hash,
|
||||
req: ParticipationRequest,
|
||||
) {
|
||||
@@ -259,15 +261,12 @@ async fn participate(
|
||||
// available data
|
||||
let (recover_available_data_tx, recover_available_data_rx) = oneshot::channel();
|
||||
sender
|
||||
.send_message(
|
||||
AvailabilityRecoveryMessage::RecoverAvailableData(
|
||||
req.candidate_receipt().clone(),
|
||||
req.session(),
|
||||
None,
|
||||
recover_available_data_tx,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
.send_message(AvailabilityRecoveryMessage::RecoverAvailableData(
|
||||
req.candidate_receipt().clone(),
|
||||
req.session(),
|
||||
None,
|
||||
recover_available_data_tx,
|
||||
))
|
||||
.await;
|
||||
|
||||
let available_data = match recover_available_data_rx.await {
|
||||
@@ -326,15 +325,12 @@ async fn participate(
|
||||
// in the dispute
|
||||
let (store_available_data_tx, store_available_data_rx) = oneshot::channel();
|
||||
sender
|
||||
.send_message(
|
||||
AvailabilityStoreMessage::StoreAvailableData {
|
||||
candidate_hash: *req.candidate_hash(),
|
||||
n_validators: req.n_validators() as u32,
|
||||
available_data: available_data.clone(),
|
||||
tx: store_available_data_tx,
|
||||
}
|
||||
.into(),
|
||||
)
|
||||
.send_message(AvailabilityStoreMessage::StoreAvailableData {
|
||||
candidate_hash: *req.candidate_hash(),
|
||||
n_validators: req.n_validators() as u32,
|
||||
available_data: available_data.clone(),
|
||||
tx: store_available_data_tx,
|
||||
})
|
||||
.await;
|
||||
|
||||
match store_available_data_rx.await {
|
||||
@@ -364,17 +360,14 @@ async fn participate(
|
||||
// same level of leeway.
|
||||
let (validation_tx, validation_rx) = oneshot::channel();
|
||||
sender
|
||||
.send_message(
|
||||
CandidateValidationMessage::ValidateFromExhaustive(
|
||||
available_data.validation_data,
|
||||
validation_code,
|
||||
req.candidate_receipt().clone(),
|
||||
available_data.pov,
|
||||
APPROVAL_EXECUTION_TIMEOUT,
|
||||
validation_tx,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
.send_message(CandidateValidationMessage::ValidateFromExhaustive(
|
||||
available_data.validation_data,
|
||||
validation_code,
|
||||
req.candidate_receipt().clone(),
|
||||
available_data.pov,
|
||||
APPROVAL_EXECUTION_TIMEOUT,
|
||||
validation_tx,
|
||||
))
|
||||
.await;
|
||||
|
||||
// we cast votes (either positive or negative) depending on the outcome of
|
||||
|
||||
@@ -20,7 +20,7 @@ use std::{
|
||||
};
|
||||
|
||||
use futures::channel::oneshot;
|
||||
use polkadot_node_subsystem::{messages::ChainApiMessage, SubsystemSender};
|
||||
use polkadot_node_subsystem::{messages::ChainApiMessage, overseer};
|
||||
use polkadot_primitives::v2::{BlockNumber, CandidateHash, CandidateReceipt, Hash, SessionIndex};
|
||||
|
||||
use crate::{
|
||||
@@ -163,7 +163,7 @@ impl Queues {
|
||||
/// Returns error in case a queue was found full already.
|
||||
pub async fn queue(
|
||||
&mut self,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::DisputeCoordinatorSenderTrait,
|
||||
priority: ParticipationPriority,
|
||||
req: ParticipationRequest,
|
||||
) -> Result<()> {
|
||||
@@ -305,7 +305,7 @@ impl CandidateComparator {
|
||||
/// `Ok(None)` in case we could not lookup the candidate's relay parent, returns a
|
||||
/// `FatalError` in case the chain API call fails with an unexpected error.
|
||||
pub async fn new(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::DisputeCoordinatorSenderTrait,
|
||||
candidate: &CandidateReceipt,
|
||||
) -> FatalResult<Option<Self>> {
|
||||
let candidate_hash = candidate.hash();
|
||||
@@ -350,11 +350,11 @@ impl Ord for CandidateComparator {
|
||||
}
|
||||
|
||||
async fn get_block_number(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::DisputeCoordinatorSenderTrait,
|
||||
relay_parent: Hash,
|
||||
) -> FatalResult<Option<BlockNumber>> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender.send_message(ChainApiMessage::BlockNumber(relay_parent, tx).into()).await;
|
||||
sender.send_message(ChainApiMessage::BlockNumber(relay_parent, tx)).await;
|
||||
rx.await
|
||||
.map_err(|_| FatalError::ChainApiSenderDropped)?
|
||||
.map_err(FatalError::ChainApiAncestors)
|
||||
|
||||
@@ -53,16 +53,15 @@ pub fn make_our_subsystem_context<S>(
|
||||
make_subsystem_context(spawn)
|
||||
}
|
||||
|
||||
async fn participate(
|
||||
ctx: &mut impl SubsystemContext,
|
||||
participation: &mut Participation,
|
||||
) -> Result<()> {
|
||||
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
|
||||
async fn participate<Context>(ctx: &mut Context, participation: &mut Participation) -> Result<()> {
|
||||
let commitments = CandidateCommitments::default();
|
||||
participate_with_commitments_hash(ctx, participation, commitments.hash()).await
|
||||
}
|
||||
|
||||
async fn participate_with_commitments_hash(
|
||||
ctx: &mut impl SubsystemContext,
|
||||
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
|
||||
async fn participate_with_commitments_hash<Context>(
|
||||
ctx: &mut Context,
|
||||
participation: &mut Participation,
|
||||
commitments_hash: Hash,
|
||||
) -> Result<()> {
|
||||
@@ -81,8 +80,9 @@ async fn participate_with_commitments_hash(
|
||||
.await
|
||||
}
|
||||
|
||||
async fn activate_leaf(
|
||||
ctx: &mut impl SubsystemContext,
|
||||
#[overseer::contextbounds(DisputeCoordinator, prefix = self::overseer)]
|
||||
async fn activate_leaf<Context>(
|
||||
ctx: &mut Context,
|
||||
participation: &mut Participation,
|
||||
block_number: BlockNumber,
|
||||
) -> FatalResult<()> {
|
||||
|
||||
@@ -21,7 +21,8 @@ use lru::LruCache;
|
||||
|
||||
use polkadot_node_primitives::MAX_FINALITY_LAG;
|
||||
use polkadot_node_subsystem::{
|
||||
messages::ChainApiMessage, ActivatedLeaf, ActiveLeavesUpdate, ChainApiError, SubsystemSender,
|
||||
messages::ChainApiMessage, overseer, ActivatedLeaf, ActiveLeavesUpdate, ChainApiError,
|
||||
SubsystemSender,
|
||||
};
|
||||
use polkadot_node_subsystem_util::runtime::{get_candidate_events, get_on_chain_votes};
|
||||
use polkadot_primitives::v2::{
|
||||
@@ -81,10 +82,13 @@ impl ChainScraper {
|
||||
/// Create a properly initialized `OrderingProvider`.
|
||||
///
|
||||
/// Returns: `Self` and any scraped votes.
|
||||
pub async fn new<Sender: SubsystemSender>(
|
||||
pub async fn new<Sender>(
|
||||
sender: &mut Sender,
|
||||
initial_head: ActivatedLeaf,
|
||||
) -> Result<(Self, Vec<ScrapedOnChainVotes>)> {
|
||||
) -> Result<(Self, Vec<ScrapedOnChainVotes>)>
|
||||
where
|
||||
Sender: overseer::DisputeCoordinatorSenderTrait,
|
||||
{
|
||||
let mut s = Self {
|
||||
included_candidates: HashSet::new(),
|
||||
candidates_by_block_number: BTreeMap::new(),
|
||||
@@ -106,11 +110,14 @@ impl ChainScraper {
|
||||
/// and updates current heads, so we can query candidates for all non finalized blocks.
|
||||
///
|
||||
/// Returns: On chain vote for the leaf and any ancestors we might not yet have seen.
|
||||
pub async fn process_active_leaves_update<Sender: SubsystemSender>(
|
||||
pub async fn process_active_leaves_update<Sender>(
|
||||
&mut self,
|
||||
sender: &mut Sender,
|
||||
update: &ActiveLeavesUpdate,
|
||||
) -> crate::error::Result<Vec<ScrapedOnChainVotes>> {
|
||||
) -> crate::error::Result<Vec<ScrapedOnChainVotes>>
|
||||
where
|
||||
Sender: overseer::DisputeCoordinatorSenderTrait,
|
||||
{
|
||||
let activated = match update.activated.as_ref() {
|
||||
Some(activated) => activated,
|
||||
None => return Ok(Vec::new()),
|
||||
@@ -160,12 +167,15 @@ impl ChainScraper {
|
||||
/// Process candidate events of a block.
|
||||
///
|
||||
/// Keep track of all included candidates.
|
||||
async fn process_candidate_events(
|
||||
async fn process_candidate_events<Sender>(
|
||||
&mut self,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut Sender,
|
||||
block_number: BlockNumber,
|
||||
block_hash: Hash,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
Sender: overseer::DisputeCoordinatorSenderTrait,
|
||||
{
|
||||
// Get included events:
|
||||
let included =
|
||||
get_candidate_events(sender, block_hash)
|
||||
@@ -196,12 +206,15 @@ impl ChainScraper {
|
||||
/// either at the block present in cache or at the last finalized block.
|
||||
///
|
||||
/// Both `head` and the latest finalized block are **not** included in the result.
|
||||
async fn get_unfinalized_block_ancestors<Sender: SubsystemSender>(
|
||||
async fn get_unfinalized_block_ancestors<Sender>(
|
||||
&mut self,
|
||||
sender: &mut Sender,
|
||||
mut head: Hash,
|
||||
mut head_number: BlockNumber,
|
||||
) -> Result<Vec<Hash>> {
|
||||
) -> Result<Vec<Hash>>
|
||||
where
|
||||
Sender: overseer::DisputeCoordinatorSenderTrait,
|
||||
{
|
||||
let target_ancestor = get_finalized_block_number(sender).await?;
|
||||
|
||||
let mut ancestors = Vec::new();
|
||||
@@ -256,26 +269,29 @@ impl ChainScraper {
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_finalized_block_number(sender: &mut impl SubsystemSender) -> FatalResult<BlockNumber> {
|
||||
async fn get_finalized_block_number<Sender>(sender: &mut Sender) -> FatalResult<BlockNumber>
|
||||
where
|
||||
Sender: overseer::DisputeCoordinatorSenderTrait,
|
||||
{
|
||||
let (number_tx, number_rx) = oneshot::channel();
|
||||
send_message_fatal(sender, ChainApiMessage::FinalizedBlockNumber(number_tx), number_rx).await
|
||||
}
|
||||
|
||||
async fn get_block_ancestors(
|
||||
sender: &mut impl SubsystemSender,
|
||||
async fn get_block_ancestors<Sender>(
|
||||
sender: &mut Sender,
|
||||
head: Hash,
|
||||
num_ancestors: BlockNumber,
|
||||
) -> FatalResult<Vec<Hash>> {
|
||||
) -> FatalResult<Vec<Hash>>
|
||||
where
|
||||
Sender: overseer::DisputeCoordinatorSenderTrait,
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender
|
||||
.send_message(
|
||||
ChainApiMessage::Ancestors {
|
||||
hash: head,
|
||||
k: num_ancestors as usize,
|
||||
response_channel: tx,
|
||||
}
|
||||
.into(),
|
||||
)
|
||||
.send_message(ChainApiMessage::Ancestors {
|
||||
hash: head,
|
||||
k: num_ancestors as usize,
|
||||
response_channel: tx,
|
||||
})
|
||||
.await;
|
||||
|
||||
rx.await
|
||||
@@ -289,9 +305,9 @@ async fn send_message_fatal<Sender, Response>(
|
||||
receiver: oneshot::Receiver<std::result::Result<Response, ChainApiError>>,
|
||||
) -> FatalResult<Response>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<ChainApiMessage>,
|
||||
{
|
||||
sender.send_message(message.into()).await;
|
||||
sender.send_message(message).await;
|
||||
|
||||
receiver
|
||||
.await
|
||||
|
||||
@@ -32,7 +32,7 @@ use polkadot_node_subsystem::{
|
||||
CandidateBackingMessage, ChainApiMessage, DisputeCoordinatorMessage, ProvisionableData,
|
||||
ProvisionerInherentData, ProvisionerMessage,
|
||||
},
|
||||
ActivatedLeaf, LeafStatus, PerLeafSpan, SubsystemSender,
|
||||
overseer, ActivatedLeaf, LeafStatus, PerLeafSpan,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{
|
||||
request_availability_cores, request_persisted_validation_data, JobSender, JobSubsystem,
|
||||
@@ -95,8 +95,12 @@ impl InherentAfter {
|
||||
}
|
||||
}
|
||||
|
||||
/// Provisioner run arguments.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ProvisionerConfig;
|
||||
|
||||
/// A per-relay-parent job for the provisioning subsystem.
|
||||
pub struct ProvisionerJob {
|
||||
pub struct ProvisionerJob<Sender> {
|
||||
leaf: ActivatedLeaf,
|
||||
receiver: mpsc::Receiver<ProvisionerMessage>,
|
||||
backed_candidates: Vec<CandidateReceipt>,
|
||||
@@ -104,14 +108,16 @@ pub struct ProvisionerJob {
|
||||
metrics: Metrics,
|
||||
inherent_after: InherentAfter,
|
||||
awaiting_inherent: Vec<oneshot::Sender<ProvisionerInherentData>>,
|
||||
_phantom: std::marker::PhantomData<Sender>,
|
||||
}
|
||||
|
||||
/// Provisioner run arguments.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct ProvisionerConfig;
|
||||
|
||||
impl JobTrait for ProvisionerJob {
|
||||
impl<Sender> JobTrait for ProvisionerJob<Sender>
|
||||
where
|
||||
Sender: overseer::ProvisionerSenderTrait + std::marker::Unpin,
|
||||
{
|
||||
type ToJob = ProvisionerMessage;
|
||||
type OutgoingMessages = overseer::ProvisionerOutgoingMessages;
|
||||
type Sender = Sender;
|
||||
type Error = Error;
|
||||
type RunArgs = ProvisionerConfig;
|
||||
type Metrics = Metrics;
|
||||
@@ -121,12 +127,12 @@ impl JobTrait for ProvisionerJob {
|
||||
/// Run a job for the parent block indicated
|
||||
//
|
||||
// this function is in charge of creating and executing the job's main loop
|
||||
fn run<S: SubsystemSender>(
|
||||
fn run(
|
||||
leaf: ActivatedLeaf,
|
||||
_: Self::RunArgs,
|
||||
metrics: Self::Metrics,
|
||||
receiver: mpsc::Receiver<ProvisionerMessage>,
|
||||
mut sender: JobSender<S>,
|
||||
mut sender: JobSender<Sender>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
|
||||
let span = leaf.span.clone();
|
||||
async move {
|
||||
@@ -139,7 +145,10 @@ impl JobTrait for ProvisionerJob {
|
||||
}
|
||||
}
|
||||
|
||||
impl ProvisionerJob {
|
||||
impl<Sender> ProvisionerJob<Sender>
|
||||
where
|
||||
Sender: overseer::ProvisionerSenderTrait,
|
||||
{
|
||||
fn new(
|
||||
leaf: ActivatedLeaf,
|
||||
metrics: Metrics,
|
||||
@@ -153,14 +162,11 @@ impl ProvisionerJob {
|
||||
metrics,
|
||||
inherent_after: InherentAfter::new_from_now(),
|
||||
awaiting_inherent: Vec::new(),
|
||||
_phantom: std::marker::PhantomData::<Sender>::default(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_loop(
|
||||
mut self,
|
||||
sender: &mut impl SubsystemSender,
|
||||
span: PerLeafSpan,
|
||||
) -> Result<(), Error> {
|
||||
async fn run_loop(mut self, sender: &mut Sender, span: PerLeafSpan) -> Result<(), Error> {
|
||||
loop {
|
||||
futures::select! {
|
||||
msg = self.receiver.next() => match msg {
|
||||
@@ -197,7 +203,7 @@ impl ProvisionerJob {
|
||||
|
||||
async fn send_inherent_data(
|
||||
&mut self,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut Sender,
|
||||
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
|
||||
) {
|
||||
if let Err(err) = send_inherent_data(
|
||||
@@ -275,7 +281,7 @@ async fn send_inherent_data(
|
||||
bitfields: &[SignedAvailabilityBitfield],
|
||||
candidates: &[CandidateReceipt],
|
||||
return_senders: Vec<oneshot::Sender<ProvisionerInherentData>>,
|
||||
from_job: &mut impl SubsystemSender,
|
||||
from_job: &mut impl overseer::ProvisionerSenderTrait,
|
||||
metrics: &Metrics,
|
||||
) -> Result<(), Error> {
|
||||
let availability_cores = request_availability_cores(leaf.hash, from_job)
|
||||
@@ -394,7 +400,7 @@ async fn select_candidates(
|
||||
bitfields: &[SignedAvailabilityBitfield],
|
||||
candidates: &[CandidateReceipt],
|
||||
relay_parent: Hash,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::ProvisionerSenderTrait,
|
||||
) -> Result<Vec<BackedCandidate>, Error> {
|
||||
let block_number = get_block_number_under_construction(relay_parent, sender).await?;
|
||||
|
||||
@@ -472,14 +478,11 @@ async fn select_candidates(
|
||||
// now get the backed candidates corresponding to these candidate receipts
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender
|
||||
.send_message(
|
||||
CandidateBackingMessage::GetBackedCandidates(
|
||||
relay_parent,
|
||||
selected_candidates.clone(),
|
||||
tx,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
.send_message(CandidateBackingMessage::GetBackedCandidates(
|
||||
relay_parent,
|
||||
selected_candidates.clone(),
|
||||
tx,
|
||||
))
|
||||
.await;
|
||||
let mut candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?;
|
||||
|
||||
@@ -530,10 +533,10 @@ async fn select_candidates(
|
||||
/// in the event of an invalid `relay_parent`, returns `Ok(0)`
|
||||
async fn get_block_number_under_construction(
|
||||
relay_parent: Hash,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::ProvisionerSenderTrait,
|
||||
) -> Result<BlockNumber, Error> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender.send_message(ChainApiMessage::BlockNumber(relay_parent, tx).into()).await;
|
||||
sender.send_message(ChainApiMessage::BlockNumber(relay_parent, tx)).await;
|
||||
|
||||
match rx.await.map_err(|err| Error::CanceledBlockNumber(err))? {
|
||||
Ok(Some(n)) => Ok(n + 1),
|
||||
@@ -591,7 +594,7 @@ enum RequestType {
|
||||
|
||||
/// Request open disputes identified by `CandidateHash` and the `SessionIndex`.
|
||||
async fn request_disputes(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::ProvisionerSenderTrait,
|
||||
active_or_recent: RequestType,
|
||||
) -> Vec<(SessionIndex, CandidateHash)> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
@@ -600,7 +603,7 @@ async fn request_disputes(
|
||||
RequestType::Active => DisputeCoordinatorMessage::ActiveDisputes(tx),
|
||||
};
|
||||
// Bounded by block production - `ProvisionerMessage::RequestInherentData`.
|
||||
sender.send_unbounded_message(msg.into());
|
||||
sender.send_unbounded_message(msg);
|
||||
|
||||
let recent_disputes = match rx.await {
|
||||
Ok(r) => r,
|
||||
@@ -614,14 +617,15 @@ async fn request_disputes(
|
||||
|
||||
/// Request the relevant dispute statements for a set of disputes identified by `CandidateHash` and the `SessionIndex`.
|
||||
async fn request_votes(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::ProvisionerSenderTrait,
|
||||
disputes_to_query: Vec<(SessionIndex, CandidateHash)>,
|
||||
) -> Vec<(SessionIndex, CandidateHash, CandidateVotes)> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
// Bounded by block production - `ProvisionerMessage::RequestInherentData`.
|
||||
sender.send_unbounded_message(
|
||||
DisputeCoordinatorMessage::QueryCandidateVotes(disputes_to_query, tx).into(),
|
||||
);
|
||||
sender.send_unbounded_message(DisputeCoordinatorMessage::QueryCandidateVotes(
|
||||
disputes_to_query,
|
||||
tx,
|
||||
));
|
||||
|
||||
match rx.await {
|
||||
Ok(v) => v,
|
||||
@@ -665,7 +669,7 @@ fn extend_by_random_subset_without_repetition(
|
||||
const MAX_DISPUTES_FORWARDED_TO_RUNTIME: usize = 1_000;
|
||||
|
||||
async fn select_disputes(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::ProvisionerSenderTrait,
|
||||
metrics: &metrics::Metrics,
|
||||
_leaf: &ActivatedLeaf,
|
||||
) -> Result<MultiDisputeStatementSet, Error> {
|
||||
@@ -804,4 +808,4 @@ async fn select_disputes(
|
||||
}
|
||||
|
||||
/// The provisioner subsystem.
|
||||
pub type ProvisionerSubsystem<Spawner> = JobSubsystem<ProvisionerJob, Spawner>;
|
||||
pub type ProvisionerSubsystem<Spawner, Sender> = JobSubsystem<ProvisionerJob<Sender>, Spawner>;
|
||||
|
||||
@@ -15,14 +15,17 @@
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use crate::error::GetOnchainDisputesError;
|
||||
use polkadot_node_subsystem::SubsystemSender;
|
||||
use polkadot_node_subsystem::overseer;
|
||||
use polkadot_primitives::v2::{CandidateHash, DisputeState, Hash, SessionIndex};
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub async fn get_onchain_disputes(
|
||||
_sender: &mut impl SubsystemSender,
|
||||
pub async fn get_onchain_disputes<Sender>(
|
||||
_sender: &mut Sender,
|
||||
_relay_parent: Hash,
|
||||
) -> Result<HashMap<(SessionIndex, CandidateHash), DisputeState>, GetOnchainDisputesError> {
|
||||
) -> Result<HashMap<(SessionIndex, CandidateHash), DisputeState>, GetOnchainDisputesError>
|
||||
where
|
||||
Sender: overseer::ProvisionerSenderTrait,
|
||||
{
|
||||
let _onchain = Result::<
|
||||
HashMap<(SessionIndex, CandidateHash), DisputeState>,
|
||||
GetOnchainDisputesError,
|
||||
@@ -46,8 +49,8 @@ mod staging_impl {
|
||||
};
|
||||
|
||||
/// Gets the on-chain disputes at a given block number and returns them as a `HashSet` so that searching in them is cheap.
|
||||
pub async fn get_onchain_disputes(
|
||||
sender: &mut impl SubsystemSender,
|
||||
pub async fn get_onchain_disputes<Sender>(
|
||||
sender: &mut Sender,
|
||||
relay_parent: Hash,
|
||||
) -> Result<HashMap<(SessionIndex, CandidateHash), DisputeState>, GetOnchainDisputesError> {
|
||||
gum::trace!(target: LOG_TARGET, ?relay_parent, "Fetching on-chain disputes");
|
||||
|
||||
@@ -22,9 +22,9 @@
|
||||
use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered};
|
||||
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{CandidateValidationMessage, PreCheckOutcome, PvfCheckerMessage},
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext,
|
||||
SubsystemError, SubsystemResult, SubsystemSender,
|
||||
messages::{CandidateValidationMessage, PreCheckOutcome, PvfCheckerMessage, RuntimeApiMessage},
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError,
|
||||
SubsystemResult, SubsystemSender,
|
||||
};
|
||||
use polkadot_primitives::v2::{
|
||||
BlockNumber, Hash, PvfCheckStatement, SessionIndex, ValidationCodeHash, ValidatorId,
|
||||
@@ -60,11 +60,8 @@ impl PvfCheckerSubsystem {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for PvfCheckerSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = PvfCheckerMessage>,
|
||||
Context: overseer::SubsystemContext<Message = PvfCheckerMessage>,
|
||||
{
|
||||
#[overseer::subsystem(PvfChecker, error=SubsystemError, prefix = self::overseer)]
|
||||
impl<Context> PvfCheckerSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
if self.enabled {
|
||||
let future = run(ctx, self.keystore, self.metrics)
|
||||
@@ -123,15 +120,12 @@ struct State {
|
||||
FuturesUnordered<BoxFuture<'static, Option<(PreCheckOutcome, ValidationCodeHash)>>>,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(PvfChecker, prefix = self::overseer)]
|
||||
async fn run<Context>(
|
||||
mut ctx: Context,
|
||||
keystore: SyncCryptoStorePtr,
|
||||
metrics: Metrics,
|
||||
) -> SubsystemResult<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = PvfCheckerMessage>,
|
||||
Context: overseer::SubsystemContext<Message = PvfCheckerMessage>,
|
||||
{
|
||||
) -> SubsystemResult<()> {
|
||||
let mut state = State {
|
||||
credentials: None,
|
||||
recent_block: None,
|
||||
@@ -179,7 +173,7 @@ where
|
||||
/// Handle an incoming PVF pre-check result from the candidate-validation subsystem.
|
||||
async fn handle_pvf_check(
|
||||
state: &mut State,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::PvfCheckerSenderTrait,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
metrics: &Metrics,
|
||||
outcome: PreCheckOutcome,
|
||||
@@ -247,7 +241,7 @@ struct Conclude;
|
||||
|
||||
async fn handle_from_overseer(
|
||||
state: &mut State,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::PvfCheckerSenderTrait,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
metrics: &Metrics,
|
||||
from_overseer: FromOverseer<PvfCheckerMessage>,
|
||||
@@ -273,7 +267,7 @@ async fn handle_from_overseer(
|
||||
|
||||
async fn handle_leaves_update(
|
||||
state: &mut State,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::PvfCheckerSenderTrait,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
metrics: &Metrics,
|
||||
update: ActiveLeavesUpdate,
|
||||
@@ -355,7 +349,7 @@ struct ActivationEffect {
|
||||
/// Returns `None` if the PVF pre-checking runtime API is not supported for the given leaf hash.
|
||||
async fn examine_activation(
|
||||
state: &mut State,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::PvfCheckerSenderTrait,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
leaf_hash: Hash,
|
||||
leaf_number: BlockNumber,
|
||||
@@ -414,7 +408,7 @@ async fn examine_activation(
|
||||
/// Checks the active validators for the given leaf. If we have a signing key for one of them,
|
||||
/// returns the [`SigningCredentials`].
|
||||
async fn check_signing_credentials(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl SubsystemSender<RuntimeApiMessage>,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
leaf: Hash,
|
||||
) -> Option<SigningCredentials> {
|
||||
@@ -443,7 +437,7 @@ async fn check_signing_credentials(
|
||||
///
|
||||
/// If the validator already voted for the given code, this function does nothing.
|
||||
async fn sign_and_submit_pvf_check_statement(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::PvfCheckerSenderTrait,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
voted: &mut HashSet<ValidationCodeHash>,
|
||||
credentials: &SigningCredentials,
|
||||
@@ -535,7 +529,7 @@ async fn sign_and_submit_pvf_check_statement(
|
||||
/// into the `currently_checking` set.
|
||||
async fn initiate_precheck(
|
||||
state: &mut State,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::PvfCheckerSenderTrait,
|
||||
relay_parent: Hash,
|
||||
validation_code_hash: ValidationCodeHash,
|
||||
metrics: &Metrics,
|
||||
@@ -544,9 +538,7 @@ async fn initiate_precheck(
|
||||
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender
|
||||
.send_message(
|
||||
CandidateValidationMessage::PreCheck(relay_parent, validation_code_hash, tx).into(),
|
||||
)
|
||||
.send_message(CandidateValidationMessage::PreCheck(relay_parent, validation_code_hash, tx))
|
||||
.await;
|
||||
|
||||
let timer = metrics.time_pre_check_judgement();
|
||||
|
||||
@@ -26,7 +26,7 @@ use polkadot_primitives::v2::{
|
||||
};
|
||||
|
||||
pub(crate) async fn session_index_for_child(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl SubsystemSender<RuntimeApiMessage>,
|
||||
relay_parent: Hash,
|
||||
) -> Result<SessionIndex, RuntimeRequestError> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
@@ -34,7 +34,7 @@ pub(crate) async fn session_index_for_child(
|
||||
}
|
||||
|
||||
pub(crate) async fn validators(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl SubsystemSender<RuntimeApiMessage>,
|
||||
relay_parent: Hash,
|
||||
) -> Result<Vec<ValidatorId>, RuntimeRequestError> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
@@ -42,7 +42,7 @@ pub(crate) async fn validators(
|
||||
}
|
||||
|
||||
pub(crate) async fn submit_pvf_check_statement(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl SubsystemSender<RuntimeApiMessage>,
|
||||
relay_parent: Hash,
|
||||
stmt: PvfCheckStatement,
|
||||
signature: ValidatorSignature,
|
||||
@@ -58,7 +58,7 @@ pub(crate) async fn submit_pvf_check_statement(
|
||||
}
|
||||
|
||||
pub(crate) async fn pvfs_require_precheck(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl SubsystemSender<RuntimeApiMessage>,
|
||||
relay_parent: Hash,
|
||||
) -> Result<Vec<ValidationCodeHash>, RuntimeRequestError> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
@@ -73,7 +73,7 @@ pub(crate) enum RuntimeRequestError {
|
||||
}
|
||||
|
||||
pub(crate) async fn runtime_api_request<T>(
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl SubsystemSender<RuntimeApiMessage>,
|
||||
relay_parent: Hash,
|
||||
request: RuntimeApiRequest,
|
||||
receiver: oneshot::Receiver<Result<T, RuntimeApiSubsystemError>>,
|
||||
|
||||
@@ -25,8 +25,7 @@
|
||||
use polkadot_node_subsystem::{
|
||||
errors::RuntimeApiError,
|
||||
messages::{RuntimeApiMessage, RuntimeApiRequest as Request},
|
||||
overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError,
|
||||
SubsystemResult,
|
||||
overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, SubsystemResult,
|
||||
};
|
||||
use polkadot_primitives::{
|
||||
runtime_api::ParachainHost,
|
||||
@@ -92,12 +91,11 @@ impl<Client> RuntimeApiSubsystem<Client> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Client, Context> overseer::Subsystem<Context, SubsystemError> for RuntimeApiSubsystem<Client>
|
||||
#[overseer::subsystem(RuntimeApi, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Client, Context> RuntimeApiSubsystem<Client>
|
||||
where
|
||||
Client: ProvideRuntimeApi<Block> + Send + 'static + Sync,
|
||||
Client::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
|
||||
Context: SubsystemContext<Message = RuntimeApiMessage>,
|
||||
Context: overseer::SubsystemContext<Message = RuntimeApiMessage>,
|
||||
{
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
SpawnedSubsystem { future: run(ctx, self).boxed(), name: "runtime-api-subsystem" }
|
||||
@@ -333,6 +331,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(RuntimeApi, prefix = self::overseer)]
|
||||
async fn run<Client, Context>(
|
||||
mut ctx: Context,
|
||||
mut subsystem: RuntimeApiSubsystem<Client>,
|
||||
@@ -340,8 +339,6 @@ async fn run<Client, Context>(
|
||||
where
|
||||
Client: ProvideRuntimeApi<Block> + Send + Sync + 'static,
|
||||
Client::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
|
||||
Context: SubsystemContext<Message = RuntimeApiMessage>,
|
||||
Context: overseer::SubsystemContext<Message = RuntimeApiMessage>,
|
||||
{
|
||||
loop {
|
||||
select! {
|
||||
|
||||
@@ -21,16 +21,18 @@
|
||||
//! messages on the overseer level.
|
||||
|
||||
use polkadot_node_subsystem::*;
|
||||
pub use polkadot_node_subsystem::{messages::AllMessages, overseer, FromOverseer};
|
||||
pub use polkadot_node_subsystem::{messages, messages::*, overseer, FromOverseer};
|
||||
use std::{future::Future, pin::Pin};
|
||||
|
||||
/// Filter incoming and outgoing messages.
|
||||
pub trait MessageInterceptor<Sender>: Send + Sync + Clone + 'static
|
||||
where
|
||||
Sender: overseer::SubsystemSender<Self::Message> + Clone + 'static,
|
||||
Sender: overseer::SubsystemSender<<Self::Message as overseer::AssociateOutgoing>::OutgoingMessages>
|
||||
+ Clone
|
||||
+ 'static,
|
||||
{
|
||||
/// The message type the original subsystem handles incoming.
|
||||
type Message: Send + 'static;
|
||||
type Message: overseer::AssociateOutgoing + Send + 'static;
|
||||
|
||||
/// Filter messages that are to be received by
|
||||
/// the subsystem.
|
||||
@@ -46,7 +48,10 @@ where
|
||||
}
|
||||
|
||||
/// Modify outgoing messages.
|
||||
fn intercept_outgoing(&self, msg: AllMessages) -> Option<AllMessages> {
|
||||
fn intercept_outgoing(
|
||||
&self,
|
||||
msg: <Self::Message as overseer::AssociateOutgoing>::OutgoingMessages,
|
||||
) -> Option<<Self::Message as overseer::AssociateOutgoing>::OutgoingMessages> {
|
||||
Some(msg)
|
||||
}
|
||||
}
|
||||
@@ -59,13 +64,26 @@ pub struct InterceptedSender<Sender, Fil> {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<Sender, Fil> overseer::SubsystemSender<AllMessages> for InterceptedSender<Sender, Fil>
|
||||
impl<OutgoingMessage, Sender, Fil> overseer::SubsystemSender<OutgoingMessage> for InterceptedSender<Sender, Fil>
|
||||
where
|
||||
Sender: overseer::SubsystemSender<AllMessages>
|
||||
+ overseer::SubsystemSender<<Fil as MessageInterceptor<Sender>>::Message>,
|
||||
OutgoingMessage: overseer::AssociateOutgoing + Send + 'static,
|
||||
Sender: overseer::SubsystemSender<OutgoingMessage>
|
||||
+ overseer::SubsystemSender<
|
||||
<
|
||||
<Fil as MessageInterceptor<Sender>>::Message as overseer::AssociateOutgoing
|
||||
>::OutgoingMessages
|
||||
>,
|
||||
Fil: MessageInterceptor<Sender>,
|
||||
<Fil as MessageInterceptor<Sender>>::Message: overseer::AssociateOutgoing,
|
||||
<
|
||||
<Fil as MessageInterceptor<Sender>>::Message as overseer::AssociateOutgoing
|
||||
>::OutgoingMessages:
|
||||
From<OutgoingMessage>,
|
||||
{
|
||||
async fn send_message(&mut self, msg: AllMessages) {
|
||||
async fn send_message(&mut self, msg: OutgoingMessage) {
|
||||
let msg = <
|
||||
<<Fil as MessageInterceptor<Sender>>::Message as overseer::AssociateOutgoing
|
||||
>::OutgoingMessages as From<OutgoingMessage>>::from(msg);
|
||||
if let Some(msg) = self.message_filter.intercept_outgoing(msg) {
|
||||
self.inner.send_message(msg).await;
|
||||
}
|
||||
@@ -73,7 +91,7 @@ where
|
||||
|
||||
async fn send_messages<T>(&mut self, msgs: T)
|
||||
where
|
||||
T: IntoIterator<Item = AllMessages> + Send,
|
||||
T: IntoIterator<Item = OutgoingMessage> + Send,
|
||||
T::IntoIter: Send,
|
||||
{
|
||||
for msg in msgs {
|
||||
@@ -81,7 +99,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn send_unbounded_message(&mut self, msg: AllMessages) {
|
||||
fn send_unbounded_message(&mut self, msg: OutgoingMessage) {
|
||||
let msg = <
|
||||
<<Fil as MessageInterceptor<Sender>>::Message as overseer::AssociateOutgoing
|
||||
>::OutgoingMessages as From<OutgoingMessage>>::from(msg);
|
||||
if let Some(msg) = self.message_filter.intercept_outgoing(msg) {
|
||||
self.inner.send_unbounded_message(msg);
|
||||
}
|
||||
@@ -91,11 +112,16 @@ where
|
||||
/// A subsystem context, that filters the outgoing messages.
|
||||
pub struct InterceptedContext<Context, Fil>
|
||||
where
|
||||
Context: overseer::SubsystemContext + SubsystemContext,
|
||||
Context: overseer::SubsystemContext<Error=SubsystemError, Signal=OverseerSignal>,
|
||||
Fil: MessageInterceptor<<Context as overseer::SubsystemContext>::Sender>,
|
||||
<Context as overseer::SubsystemContext>::Sender: overseer::SubsystemSender<
|
||||
<Fil as MessageInterceptor<<Context as overseer::SubsystemContext>::Sender>>::Message,
|
||||
>,
|
||||
<Context as overseer::SubsystemContext>::Sender:
|
||||
overseer::SubsystemSender<
|
||||
<
|
||||
<
|
||||
Fil as MessageInterceptor<<Context as overseer::SubsystemContext>::Sender>
|
||||
>::Message as overseer::AssociateOutgoing
|
||||
>::OutgoingMessages,
|
||||
>,
|
||||
{
|
||||
inner: Context,
|
||||
message_filter: Fil,
|
||||
@@ -104,14 +130,15 @@ where
|
||||
|
||||
impl<Context, Fil> InterceptedContext<Context, Fil>
|
||||
where
|
||||
Context: overseer::SubsystemContext + SubsystemContext,
|
||||
Context: overseer::SubsystemContext<Error=SubsystemError,Signal=OverseerSignal>,
|
||||
Fil: MessageInterceptor<
|
||||
<Context as overseer::SubsystemContext>::Sender,
|
||||
Message = <Context as overseer::SubsystemContext>::Message,
|
||||
>,
|
||||
<Context as overseer::SubsystemContext>::Message: overseer::AssociateOutgoing,
|
||||
<Context as overseer::SubsystemContext>::Sender: overseer::SubsystemSender<
|
||||
<Fil as MessageInterceptor<<Context as overseer::SubsystemContext>::Sender>>::Message,
|
||||
>,
|
||||
<<Context as overseer::SubsystemContext>::Message as overseer::AssociateOutgoing>::OutgoingMessages
|
||||
>
|
||||
{
|
||||
pub fn new(mut inner: Context, message_filter: Fil) -> Self {
|
||||
let sender = InterceptedSender::<<Context as overseer::SubsystemContext>::Sender, Fil> {
|
||||
@@ -125,22 +152,27 @@ where
|
||||
#[async_trait::async_trait]
|
||||
impl<Context, Fil> overseer::SubsystemContext for InterceptedContext<Context, Fil>
|
||||
where
|
||||
Context: overseer::SubsystemContext + SubsystemContext,
|
||||
Context: overseer::SubsystemContext<Error=SubsystemError,Signal=OverseerSignal>,
|
||||
<Context as overseer::SubsystemContext>::Message:
|
||||
overseer::AssociateOutgoing,
|
||||
<Context as overseer::SubsystemContext>::Sender:
|
||||
overseer::SubsystemSender<
|
||||
<<Context as overseer::SubsystemContext>::Message as overseer::AssociateOutgoing>::OutgoingMessages
|
||||
>,
|
||||
InterceptedSender<<Context as overseer::SubsystemContext>::Sender, Fil>:
|
||||
overseer::SubsystemSender<
|
||||
<<Context as overseer::SubsystemContext>::Message as overseer::AssociateOutgoing>::OutgoingMessages
|
||||
>,
|
||||
Fil: MessageInterceptor<
|
||||
<Context as overseer::SubsystemContext>::Sender,
|
||||
Message = <Context as overseer::SubsystemContext>::Message,
|
||||
>,
|
||||
<Context as overseer::SubsystemContext>::AllMessages:
|
||||
From<<Context as overseer::SubsystemContext>::Message>,
|
||||
<Context as overseer::SubsystemContext>::Sender: overseer::SubsystemSender<
|
||||
<Fil as MessageInterceptor<<Context as overseer::SubsystemContext>::Sender>>::Message,
|
||||
>,
|
||||
{
|
||||
type Message = <Context as overseer::SubsystemContext>::Message;
|
||||
type Sender = InterceptedSender<<Context as overseer::SubsystemContext>::Sender, Fil>;
|
||||
type Error = <Context as overseer::SubsystemContext>::Error;
|
||||
type AllMessages = <Context as overseer::SubsystemContext>::AllMessages;
|
||||
type Signal = <Context as overseer::SubsystemContext>::Signal;
|
||||
type Error = SubsystemError;
|
||||
type OutgoingMessages = <<Context as overseer::SubsystemContext>::Message as overseer::AssociateOutgoing>::OutgoingMessages;
|
||||
type Signal = OverseerSignal;
|
||||
|
||||
async fn try_recv(&mut self) -> Result<Option<FromOverseer<Self::Message>>, ()> {
|
||||
loop {
|
||||
@@ -200,16 +232,28 @@ impl<Sub, Interceptor> InterceptedSubsystem<Sub, Interceptor> {
|
||||
|
||||
impl<Context, Sub, Interceptor> overseer::Subsystem<Context, SubsystemError> for InterceptedSubsystem<Sub, Interceptor>
|
||||
where
|
||||
Context: overseer::SubsystemContext + SubsystemContext + Sync + Send,
|
||||
Sub: overseer::Subsystem<InterceptedContext<Context, Interceptor>, SubsystemError>,
|
||||
InterceptedContext<Context, Interceptor>: overseer::SubsystemContext + SubsystemContext,
|
||||
Interceptor: MessageInterceptor<
|
||||
<Context as overseer::SubsystemContext>::Sender,
|
||||
Message = <Context as overseer::SubsystemContext>::Message,
|
||||
>,
|
||||
<Context as overseer::SubsystemContext>::Sender: overseer::SubsystemSender<
|
||||
<Interceptor as MessageInterceptor<<Context as overseer::SubsystemContext>::Sender>>::Message,
|
||||
>,
|
||||
Context:
|
||||
overseer::SubsystemContext<Error=SubsystemError,Signal=OverseerSignal> + Sync + Send,
|
||||
InterceptedContext<Context, Interceptor>:
|
||||
overseer::SubsystemContext<Error=SubsystemError,Signal=OverseerSignal>,
|
||||
Sub:
|
||||
overseer::Subsystem<InterceptedContext<Context, Interceptor>, SubsystemError>,
|
||||
Interceptor:
|
||||
MessageInterceptor<
|
||||
<Context as overseer::SubsystemContext>::Sender,
|
||||
Message = <Context as overseer::SubsystemContext>::Message,
|
||||
>,
|
||||
// <Context as overseer::SubsystemContext>::Sender:
|
||||
// overseer::SubsystemSender<
|
||||
// <Interceptor as MessageInterceptor<<Context as overseer::SubsystemContext>::Sender>>::Message,
|
||||
// >,
|
||||
<Context as overseer::SubsystemContext>::Message:
|
||||
overseer::AssociateOutgoing,
|
||||
<Context as overseer::SubsystemContext>::Sender:
|
||||
overseer::SubsystemSender<
|
||||
<<Context as overseer::SubsystemContext>::Message as overseer::AssociateOutgoing
|
||||
>::OutgoingMessages
|
||||
>,
|
||||
{
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let ctx = InterceptedContext::new(ctx, self.message_interceptor);
|
||||
|
||||
@@ -19,8 +19,8 @@ use super::*;
|
||||
use polkadot_node_subsystem_test_helpers::*;
|
||||
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{AllMessages, AvailabilityStoreMessage},
|
||||
overseer::{dummy::DummySubsystem, gen::TimeoutExt, Subsystem},
|
||||
messages::AvailabilityStoreMessage,
|
||||
overseer::{dummy::DummySubsystem, gen::TimeoutExt, Subsystem, AssociateOutgoing},
|
||||
SubsystemError,
|
||||
};
|
||||
|
||||
@@ -29,8 +29,7 @@ struct BlackHoleInterceptor;
|
||||
|
||||
impl<Sender> MessageInterceptor<Sender> for BlackHoleInterceptor
|
||||
where
|
||||
Sender: overseer::SubsystemSender<AllMessages>
|
||||
+ overseer::SubsystemSender<AvailabilityStoreMessage>
|
||||
Sender: overseer::AvailabilityStoreSenderTrait
|
||||
+ Clone
|
||||
+ 'static,
|
||||
{
|
||||
@@ -53,8 +52,7 @@ struct PassInterceptor;
|
||||
|
||||
impl<Sender> MessageInterceptor<Sender> for PassInterceptor
|
||||
where
|
||||
Sender: overseer::SubsystemSender<AllMessages>
|
||||
+ overseer::SubsystemSender<AvailabilityStoreMessage>
|
||||
Sender: overseer::AvailabilityStoreSenderTrait
|
||||
+ Clone
|
||||
+ 'static,
|
||||
{
|
||||
@@ -68,8 +66,8 @@ async fn overseer_send<T: Into<AllMessages>>(overseer: &mut TestSubsystemContext
|
||||
fn launch_harness<F, M, Sub, G>(test_gen: G)
|
||||
where
|
||||
F: Future<Output = TestSubsystemContextHandle<M>> + Send,
|
||||
M: Into<AllMessages> + std::fmt::Debug + Send + 'static,
|
||||
AllMessages: From<M>,
|
||||
M: AssociateOutgoing + std::fmt::Debug + Send + 'static,
|
||||
// <M as AssociateOutgoing>::OutgoingMessages: From<M>,
|
||||
Sub: Subsystem<TestSubsystemContext<M, sp_core::testing::TaskExecutor>, SubsystemError>,
|
||||
G: Fn(TestSubsystemContextHandle<M>) -> (F, Sub),
|
||||
{
|
||||
|
||||
@@ -131,11 +131,7 @@ where
|
||||
subsystem_sender: Sender,
|
||||
response_sender: oneshot::Sender<Result<ValidationResult, ValidationFailed>>,
|
||||
) where
|
||||
Sender: overseer::SubsystemSender<AllMessages>
|
||||
+ overseer::SubsystemSender<CandidateValidationMessage>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
Sender: overseer::CandidateValidationSenderTrait + Clone + Send + 'static,
|
||||
{
|
||||
let _candidate_descriptor = candidate_descriptor.clone();
|
||||
let mut subsystem_sender = subsystem_sender.clone();
|
||||
@@ -200,11 +196,7 @@ fn create_validation_response(
|
||||
|
||||
impl<Sender, Spawner> MessageInterceptor<Sender> for ReplaceValidationResult<Spawner>
|
||||
where
|
||||
Sender: overseer::SubsystemSender<CandidateValidationMessage>
|
||||
+ overseer::SubsystemSender<AllMessages>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
Sender: overseer::CandidateValidationSenderTrait + Clone + Send + 'static,
|
||||
Spawner: SpawnNamed + Clone + 'static,
|
||||
{
|
||||
type Message = CandidateValidationMessage;
|
||||
@@ -336,7 +328,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn intercept_outgoing(&self, msg: AllMessages) -> Option<AllMessages> {
|
||||
fn intercept_outgoing(
|
||||
&self,
|
||||
msg: overseer::CandidateValidationOutgoingMessages,
|
||||
) -> Option<overseer::CandidateValidationOutgoingMessages> {
|
||||
Some(msg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,11 +71,7 @@ struct NoteCandidate<Spawner> {
|
||||
|
||||
impl<Sender, Spawner> MessageInterceptor<Sender> for NoteCandidate<Spawner>
|
||||
where
|
||||
Sender: overseer::SubsystemSender<AllMessages>
|
||||
+ overseer::SubsystemSender<CandidateBackingMessage>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
Sender: overseer::CandidateBackingSenderTrait + Clone + Send + 'static,
|
||||
Spawner: SpawnNamed + Clone + 'static,
|
||||
{
|
||||
type Message = CandidateBackingMessage;
|
||||
@@ -219,20 +215,21 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
fn intercept_outgoing(&self, msg: AllMessages) -> Option<AllMessages> {
|
||||
fn intercept_outgoing(
|
||||
&self,
|
||||
msg: overseer::CandidateBackingOutgoingMessages,
|
||||
) -> Option<overseer::CandidateBackingOutgoingMessages> {
|
||||
let msg = match msg {
|
||||
AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(
|
||||
relay_parent,
|
||||
statement,
|
||||
)) => {
|
||||
overseer::CandidateBackingOutgoingMessages::CollatorProtocolMessage(
|
||||
CollatorProtocolMessage::Seconded(relay_parent, statement),
|
||||
) => {
|
||||
// `parachain::collator-protocol: received an unexpected `CollationSeconded`: unknown statement statement=...`
|
||||
// TODO: Fix this error. We get this on colaltors because `malicious backing` creates a candidate that gets backed/included.
|
||||
// It is harmless for test parachain collators, but it will prevent cumulus based collators to make progress
|
||||
// as they wait for the relay chain to confirm the seconding of the collation.
|
||||
AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(
|
||||
relay_parent,
|
||||
statement,
|
||||
))
|
||||
overseer::CandidateBackingOutgoingMessages::CollatorProtocolMessage(
|
||||
CollatorProtocolMessage::Seconded(relay_parent, statement),
|
||||
)
|
||||
},
|
||||
msg => msg,
|
||||
};
|
||||
|
||||
@@ -41,7 +41,7 @@ pub fn logger_hook() -> impl FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Con
|
||||
|_logger_builder, _config| {}
|
||||
}
|
||||
|
||||
/// This module reexports Prometheus types and defines the [`Metrics`] trait.
|
||||
/// This module reexports Prometheus types and defines the [`Metrics`](metrics::Metrics) trait.
|
||||
pub mod metrics {
|
||||
/// Reexport Substrate Prometheus types.
|
||||
pub use substrate_prometheus_endpoint as prometheus;
|
||||
|
||||
@@ -34,8 +34,7 @@ use polkadot_node_subsystem::{
|
||||
ApprovalCheckResult, ApprovalDistributionMessage, ApprovalVotingMessage,
|
||||
AssignmentCheckResult, NetworkBridgeEvent, NetworkBridgeMessage,
|
||||
},
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext,
|
||||
SubsystemError,
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError,
|
||||
};
|
||||
use polkadot_primitives::v2::{
|
||||
BlockNumber, CandidateIndex, Hash, SessionIndex, ValidatorIndex, ValidatorSignature,
|
||||
@@ -321,11 +320,11 @@ enum PendingMessage {
|
||||
Approval(IndirectSignedApprovalVote),
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(ApprovalDistribution, prefix = self::overseer)]
|
||||
impl State {
|
||||
async fn handle_network_msg(
|
||||
async fn handle_network_msg<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
ctx: &mut Context,
|
||||
metrics: &Metrics,
|
||||
event: NetworkBridgeEvent<net_protocol::ApprovalDistributionMessage>,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
@@ -377,10 +376,9 @@ impl State {
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_new_blocks(
|
||||
async fn handle_new_blocks<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
ctx: &mut Context,
|
||||
metrics: &Metrics,
|
||||
metas: Vec<BlockApprovalMeta>,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
@@ -421,11 +419,12 @@ impl State {
|
||||
);
|
||||
|
||||
{
|
||||
let sender = ctx.sender();
|
||||
for (peer_id, view) in self.peer_views.iter() {
|
||||
let intersection = view.iter().filter(|h| new_hashes.contains(h));
|
||||
let view_intersection = View::new(intersection.cloned(), view.finalized_number);
|
||||
Self::unify_with_peer(
|
||||
ctx,
|
||||
sender,
|
||||
metrics,
|
||||
&mut self.blocks,
|
||||
&self.topologies,
|
||||
@@ -496,10 +495,9 @@ impl State {
|
||||
self.enable_aggression(ctx, Resend::Yes, metrics).await;
|
||||
}
|
||||
|
||||
async fn handle_new_session_topology(
|
||||
async fn handle_new_session_topology<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
ctx: &mut Context,
|
||||
session: SessionIndex,
|
||||
topology: SessionGridTopology,
|
||||
) {
|
||||
@@ -520,15 +518,16 @@ impl State {
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn process_incoming_peer_message(
|
||||
async fn process_incoming_peer_message<Context, R>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
ctx: &mut Context,
|
||||
metrics: &Metrics,
|
||||
peer_id: PeerId,
|
||||
msg: protocol_v1::ApprovalDistributionMessage,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) {
|
||||
rng: &mut R,
|
||||
) where
|
||||
R: CryptoRng + Rng,
|
||||
{
|
||||
match msg {
|
||||
protocol_v1::ApprovalDistributionMessage::Assignments(assignments) => {
|
||||
gum::trace!(
|
||||
@@ -612,15 +611,16 @@ impl State {
|
||||
|
||||
// handle a peer view change: requires that the peer is already connected
|
||||
// and has an entry in the `PeerData` struct.
|
||||
async fn handle_peer_view_change(
|
||||
async fn handle_peer_view_change<Context, R>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
ctx: &mut Context,
|
||||
metrics: &Metrics,
|
||||
peer_id: PeerId,
|
||||
view: View,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) {
|
||||
rng: &mut R,
|
||||
) where
|
||||
R: CryptoRng + Rng,
|
||||
{
|
||||
gum::trace!(target: LOG_TARGET, ?view, "Peer view change");
|
||||
let finalized_number = view.finalized_number;
|
||||
let old_view =
|
||||
@@ -646,7 +646,7 @@ impl State {
|
||||
}
|
||||
|
||||
Self::unify_with_peer(
|
||||
ctx,
|
||||
ctx.sender(),
|
||||
metrics,
|
||||
&mut self.blocks,
|
||||
&self.topologies,
|
||||
@@ -658,10 +658,9 @@ impl State {
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn handle_block_finalized(
|
||||
async fn handle_block_finalized<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
ctx: &mut Context,
|
||||
metrics: &Metrics,
|
||||
finalized_number: BlockNumber,
|
||||
) {
|
||||
@@ -687,16 +686,17 @@ impl State {
|
||||
self.enable_aggression(ctx, Resend::No, metrics).await;
|
||||
}
|
||||
|
||||
async fn import_and_circulate_assignment(
|
||||
async fn import_and_circulate_assignment<Context, R>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
ctx: &mut Context,
|
||||
metrics: &Metrics,
|
||||
source: MessageSource,
|
||||
assignment: IndirectAssignmentCert,
|
||||
claimed_candidate_index: CandidateIndex,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) {
|
||||
rng: &mut R,
|
||||
) where
|
||||
R: CryptoRng + Rng,
|
||||
{
|
||||
let block_hash = assignment.block_hash.clone();
|
||||
let validator_index = assignment.validator;
|
||||
|
||||
@@ -712,7 +712,7 @@ impl State {
|
||||
"Unexpected assignment",
|
||||
);
|
||||
if !self.recent_outdated_blocks.is_recent_outdated(&block_hash) {
|
||||
modify_reputation(ctx, peer_id, COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await;
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -737,7 +737,7 @@ impl State {
|
||||
?message_subject,
|
||||
"Duplicate assignment",
|
||||
);
|
||||
modify_reputation(ctx, peer_id, COST_DUPLICATE_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id, COST_DUPLICATE_MESSAGE).await;
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -749,13 +749,13 @@ impl State {
|
||||
?message_subject,
|
||||
"Assignment from a peer is out of view",
|
||||
);
|
||||
modify_reputation(ctx, peer_id.clone(), COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await;
|
||||
},
|
||||
}
|
||||
|
||||
// if the assignment is known to be valid, reward the peer
|
||||
if entry.knowledge.contains(&message_subject, message_kind) {
|
||||
modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
|
||||
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
|
||||
gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known assignment");
|
||||
peer_knowledge.received.insert(message_subject, message_kind);
|
||||
@@ -791,7 +791,8 @@ impl State {
|
||||
);
|
||||
match result {
|
||||
AssignmentCheckResult::Accepted => {
|
||||
modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST).await;
|
||||
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST)
|
||||
.await;
|
||||
entry.knowledge.known_messages.insert(message_subject.clone(), message_kind);
|
||||
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
|
||||
peer_knowledge.received.insert(message_subject.clone(), message_kind);
|
||||
@@ -819,7 +820,8 @@ impl State {
|
||||
?peer_id,
|
||||
"Got an assignment too far in the future",
|
||||
);
|
||||
modify_reputation(ctx, peer_id, COST_ASSIGNMENT_TOO_FAR_IN_THE_FUTURE).await;
|
||||
modify_reputation(ctx.sender(), peer_id, COST_ASSIGNMENT_TOO_FAR_IN_THE_FUTURE)
|
||||
.await;
|
||||
return
|
||||
},
|
||||
AssignmentCheckResult::Bad(error) => {
|
||||
@@ -830,7 +832,7 @@ impl State {
|
||||
%error,
|
||||
"Got a bad assignment from peer",
|
||||
);
|
||||
modify_reputation(ctx, peer_id, COST_INVALID_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id, COST_INVALID_MESSAGE).await;
|
||||
return
|
||||
},
|
||||
}
|
||||
@@ -946,10 +948,9 @@ impl State {
|
||||
}
|
||||
}
|
||||
|
||||
async fn import_and_circulate_approval(
|
||||
async fn import_and_circulate_approval<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
ctx: &mut Context,
|
||||
metrics: &Metrics,
|
||||
source: MessageSource,
|
||||
vote: IndirectSignedApprovalVote,
|
||||
@@ -963,7 +964,7 @@ impl State {
|
||||
_ => {
|
||||
if let Some(peer_id) = source.peer_id() {
|
||||
if !self.recent_outdated_blocks.is_recent_outdated(&block_hash) {
|
||||
modify_reputation(ctx, peer_id, COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await;
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -982,7 +983,7 @@ impl State {
|
||||
?message_subject,
|
||||
"Unknown approval assignment",
|
||||
);
|
||||
modify_reputation(ctx, peer_id, COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await;
|
||||
return
|
||||
}
|
||||
|
||||
@@ -999,7 +1000,7 @@ impl State {
|
||||
"Duplicate approval",
|
||||
);
|
||||
|
||||
modify_reputation(ctx, peer_id, COST_DUPLICATE_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id, COST_DUPLICATE_MESSAGE).await;
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1011,14 +1012,14 @@ impl State {
|
||||
?message_subject,
|
||||
"Approval from a peer is out of view",
|
||||
);
|
||||
modify_reputation(ctx, peer_id.clone(), COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await;
|
||||
},
|
||||
}
|
||||
|
||||
// if the approval is known to be valid, reward the peer
|
||||
if entry.knowledge.contains(&message_subject, message_kind) {
|
||||
gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known approval");
|
||||
modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await;
|
||||
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
|
||||
peer_knowledge.received.insert(message_subject.clone(), message_kind);
|
||||
}
|
||||
@@ -1049,7 +1050,8 @@ impl State {
|
||||
);
|
||||
match result {
|
||||
ApprovalCheckResult::Accepted => {
|
||||
modify_reputation(ctx, peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST).await;
|
||||
modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST)
|
||||
.await;
|
||||
|
||||
entry.knowledge.insert(message_subject.clone(), message_kind);
|
||||
if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) {
|
||||
@@ -1057,7 +1059,7 @@ impl State {
|
||||
}
|
||||
},
|
||||
ApprovalCheckResult::Bad(error) => {
|
||||
modify_reputation(ctx, peer_id, COST_INVALID_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), peer_id, COST_INVALID_MESSAGE).await;
|
||||
gum::info!(
|
||||
target: LOG_TARGET,
|
||||
?peer_id,
|
||||
@@ -1209,8 +1211,7 @@ impl State {
|
||||
}
|
||||
|
||||
async fn unify_with_peer(
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
sender: &mut impl overseer::ApprovalDistributionSenderTrait,
|
||||
metrics: &Metrics,
|
||||
entries: &mut HashMap<Hash, BlockEntry>,
|
||||
topologies: &SessionGridTopologies,
|
||||
@@ -1326,13 +1327,14 @@ impl State {
|
||||
"Sending assignments to unified peer",
|
||||
);
|
||||
|
||||
ctx.send_message(NetworkBridgeMessage::SendValidationMessage(
|
||||
vec![peer_id.clone()],
|
||||
Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution(
|
||||
protocol_v1::ApprovalDistributionMessage::Assignments(assignments_to_send),
|
||||
)),
|
||||
))
|
||||
.await;
|
||||
sender
|
||||
.send_message(NetworkBridgeMessage::SendValidationMessage(
|
||||
vec![peer_id.clone()],
|
||||
Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution(
|
||||
protocol_v1::ApprovalDistributionMessage::Assignments(assignments_to_send),
|
||||
)),
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
if !approvals_to_send.is_empty() {
|
||||
@@ -1343,20 +1345,20 @@ impl State {
|
||||
"Sending approvals to unified peer",
|
||||
);
|
||||
|
||||
ctx.send_message(NetworkBridgeMessage::SendValidationMessage(
|
||||
vec![peer_id],
|
||||
Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution(
|
||||
protocol_v1::ApprovalDistributionMessage::Approvals(approvals_to_send),
|
||||
)),
|
||||
))
|
||||
.await;
|
||||
sender
|
||||
.send_message(NetworkBridgeMessage::SendValidationMessage(
|
||||
vec![peer_id],
|
||||
Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution(
|
||||
protocol_v1::ApprovalDistributionMessage::Approvals(approvals_to_send),
|
||||
)),
|
||||
))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn enable_aggression(
|
||||
async fn enable_aggression<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
ctx: &mut Context,
|
||||
resend: Resend,
|
||||
metrics: &Metrics,
|
||||
) {
|
||||
@@ -1457,14 +1459,17 @@ impl State {
|
||||
//
|
||||
// Note that the required routing of a message can be modified even if the
|
||||
// topology is unknown yet.
|
||||
async fn adjust_required_routing_and_propagate(
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
#[overseer::contextbounds(ApprovalDistribution, prefix = self::overseer)]
|
||||
async fn adjust_required_routing_and_propagate<Context, BlockFilter, RoutingModifier>(
|
||||
ctx: &mut Context,
|
||||
blocks: &mut HashMap<Hash, BlockEntry>,
|
||||
topologies: &SessionGridTopologies,
|
||||
block_filter: impl Fn(&mut BlockEntry) -> bool,
|
||||
routing_modifier: impl Fn(&mut RequiredRouting, bool, &ValidatorIndex),
|
||||
) {
|
||||
block_filter: BlockFilter,
|
||||
routing_modifier: RoutingModifier,
|
||||
) where
|
||||
BlockFilter: Fn(&mut BlockEntry) -> bool,
|
||||
RoutingModifier: Fn(&mut RequiredRouting, bool, &ValidatorIndex),
|
||||
{
|
||||
let mut peer_assignments = HashMap::new();
|
||||
let mut peer_approvals = HashMap::new();
|
||||
|
||||
@@ -1566,8 +1571,7 @@ async fn adjust_required_routing_and_propagate(
|
||||
|
||||
/// Modify the reputation of a peer based on its behavior.
|
||||
async fn modify_reputation(
|
||||
ctx: &mut (impl SubsystemContext<Message = ApprovalDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = ApprovalDistributionMessage>),
|
||||
sender: &mut impl overseer::ApprovalDistributionSenderTrait,
|
||||
peer_id: PeerId,
|
||||
rep: Rep,
|
||||
) {
|
||||
@@ -1578,20 +1582,17 @@ async fn modify_reputation(
|
||||
"Reputation change for peer",
|
||||
);
|
||||
|
||||
ctx.send_message(NetworkBridgeMessage::ReportPeer(peer_id, rep)).await;
|
||||
sender.send_message(NetworkBridgeMessage::ReportPeer(peer_id, rep)).await;
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(ApprovalDistribution, prefix = self::overseer)]
|
||||
impl ApprovalDistribution {
|
||||
/// Create a new instance of the [`ApprovalDistribution`] subsystem.
|
||||
pub fn new(metrics: Metrics) -> Self {
|
||||
Self { metrics }
|
||||
}
|
||||
|
||||
async fn run<Context>(self, ctx: Context)
|
||||
where
|
||||
Context: SubsystemContext<Message = ApprovalDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ApprovalDistributionMessage>,
|
||||
{
|
||||
async fn run<Context>(self, ctx: Context) {
|
||||
let mut state = State::default();
|
||||
|
||||
// According to the docs of `rand`, this is a ChaCha12 RNG in practice
|
||||
@@ -1606,10 +1607,7 @@ impl ApprovalDistribution {
|
||||
mut ctx: Context,
|
||||
state: &mut State,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) where
|
||||
Context: SubsystemContext<Message = ApprovalDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ApprovalDistributionMessage>,
|
||||
{
|
||||
) {
|
||||
loop {
|
||||
let message = match ctx.recv().await {
|
||||
Ok(message) => message,
|
||||
@@ -1644,10 +1642,7 @@ impl ApprovalDistribution {
|
||||
msg: ApprovalDistributionMessage,
|
||||
metrics: &Metrics,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) where
|
||||
Context: SubsystemContext<Message = ApprovalDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ApprovalDistributionMessage>,
|
||||
{
|
||||
) {
|
||||
match msg {
|
||||
ApprovalDistributionMessage::NetworkBridgeUpdate(event) => {
|
||||
state.handle_network_msg(ctx, metrics, event, rng).await;
|
||||
@@ -1690,11 +1685,8 @@ impl ApprovalDistribution {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for ApprovalDistribution
|
||||
where
|
||||
Context: SubsystemContext<Message = ApprovalDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = ApprovalDistributionMessage>,
|
||||
{
|
||||
#[overseer::subsystem(ApprovalDistribution, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context> ApprovalDistribution {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = self.run(ctx).map(|_| Ok(())).boxed();
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ use sp_keystore::SyncCryptoStorePtr;
|
||||
use polkadot_node_network_protocol::request_response::{v1, IncomingRequestReceiver};
|
||||
use polkadot_node_subsystem::{
|
||||
messages::AvailabilityDistributionMessage, overseer, FromOverseer, OverseerSignal,
|
||||
SpawnedSubsystem, SubsystemContext, SubsystemError,
|
||||
SpawnedSubsystem, SubsystemError,
|
||||
};
|
||||
|
||||
/// Error and [`Result`] type for this subsystem.
|
||||
@@ -68,11 +68,8 @@ pub struct IncomingRequestReceivers {
|
||||
pub chunk_req_receiver: IncomingRequestReceiver<v1::ChunkFetchingRequest>,
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for AvailabilityDistributionSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityDistributionMessage>,
|
||||
{
|
||||
#[overseer::subsystem(AvailabilityDistribution, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context> AvailabilityDistributionSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = self
|
||||
.run(ctx)
|
||||
@@ -83,6 +80,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)]
|
||||
impl AvailabilityDistributionSubsystem {
|
||||
/// Create a new instance of the availability distribution.
|
||||
pub fn new(
|
||||
@@ -95,11 +93,7 @@ impl AvailabilityDistributionSubsystem {
|
||||
}
|
||||
|
||||
/// Start processing work as passed on from the Overseer.
|
||||
async fn run<Context>(self, mut ctx: Context) -> std::result::Result<(), FatalError>
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityDistributionMessage>,
|
||||
{
|
||||
async fn run<Context>(self, mut ctx: Context) -> std::result::Result<(), FatalError> {
|
||||
let Self { mut runtime, recvs, metrics } = self;
|
||||
|
||||
let IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver } = recvs;
|
||||
|
||||
@@ -27,7 +27,7 @@ use polkadot_node_primitives::PoV;
|
||||
use polkadot_node_subsystem::{
|
||||
jaeger,
|
||||
messages::{IfDisconnected, NetworkBridgeMessage},
|
||||
SubsystemContext,
|
||||
overseer,
|
||||
};
|
||||
use polkadot_node_subsystem_util::runtime::RuntimeInfo;
|
||||
use polkadot_primitives::v2::{AuthorityDiscoveryId, CandidateHash, Hash, ValidatorIndex};
|
||||
@@ -39,6 +39,7 @@ use crate::{
|
||||
};
|
||||
|
||||
/// Start background worker for taking care of fetching the requested `PoV` from the network.
|
||||
#[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)]
|
||||
pub async fn fetch_pov<Context>(
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
@@ -48,10 +49,7 @@ pub async fn fetch_pov<Context>(
|
||||
pov_hash: Hash,
|
||||
tx: oneshot::Sender<PoV>,
|
||||
metrics: Metrics,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
{
|
||||
) -> Result<()> {
|
||||
let info = &runtime.get_session_info(ctx.sender(), parent).await?.session_info;
|
||||
let authority_id = info
|
||||
.discovery_keys
|
||||
|
||||
@@ -30,8 +30,8 @@ use polkadot_node_network_protocol::request_response::{
|
||||
use polkadot_node_primitives::ErasureChunk;
|
||||
use polkadot_node_subsystem::{
|
||||
jaeger,
|
||||
messages::{AllMessages, AvailabilityStoreMessage, IfDisconnected, NetworkBridgeMessage},
|
||||
SubsystemContext,
|
||||
messages::{AvailabilityStoreMessage, IfDisconnected, NetworkBridgeMessage},
|
||||
overseer,
|
||||
};
|
||||
use polkadot_primitives::v2::{
|
||||
AuthorityDiscoveryId, BlakeTwo256, CandidateHash, GroupIndex, Hash, HashT, OccupiedCore,
|
||||
@@ -84,7 +84,7 @@ enum FetchedState {
|
||||
/// Messages sent from `FetchTask`s to be handled/forwarded.
|
||||
pub enum FromFetchTask {
|
||||
/// Message to other subsystem.
|
||||
Message(AllMessages),
|
||||
Message(overseer::AvailabilityDistributionOutgoingMessages),
|
||||
|
||||
/// Concluded with result.
|
||||
///
|
||||
@@ -171,14 +171,12 @@ impl FetchTaskConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)]
|
||||
impl FetchTask {
|
||||
/// Start fetching a chunk.
|
||||
///
|
||||
/// A task handling the fetching of the configured chunk will be spawned.
|
||||
pub async fn start<Context>(config: FetchTaskConfig, ctx: &mut Context) -> Result<Self>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
{
|
||||
pub async fn start<Context>(config: FetchTaskConfig, ctx: &mut Context) -> Result<Self> {
|
||||
let FetchTaskConfig { prepared_running, live_in } = config;
|
||||
|
||||
if let Some(running) = prepared_running {
|
||||
@@ -333,9 +331,10 @@ impl RunningTask {
|
||||
let requests = Requests::ChunkFetchingV1(full_request);
|
||||
|
||||
self.sender
|
||||
.send(FromFetchTask::Message(AllMessages::NetworkBridge(
|
||||
NetworkBridgeMessage::SendRequests(vec![requests], IfDisconnected::ImmediateError),
|
||||
)))
|
||||
.send(FromFetchTask::Message(
|
||||
NetworkBridgeMessage::SendRequests(vec![requests], IfDisconnected::ImmediateError)
|
||||
.into(),
|
||||
))
|
||||
.await
|
||||
.map_err(|_| TaskError::ShuttingDown)?;
|
||||
|
||||
@@ -413,13 +412,14 @@ impl RunningTask {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let r = self
|
||||
.sender
|
||||
.send(FromFetchTask::Message(AllMessages::AvailabilityStore(
|
||||
.send(FromFetchTask::Message(
|
||||
AvailabilityStoreMessage::StoreChunk {
|
||||
candidate_hash: self.request.candidate_hash,
|
||||
chunk,
|
||||
tx,
|
||||
},
|
||||
)))
|
||||
}
|
||||
.into(),
|
||||
))
|
||||
.await;
|
||||
if let Err(err) = r {
|
||||
gum::error!(target: LOG_TARGET, err= ?err, "Storing erasure chunk failed, system shutting down?");
|
||||
|
||||
@@ -227,7 +227,11 @@ impl TestRun {
|
||||
|
||||
/// Returns true, if after processing of the given message it would be OK for the stream to
|
||||
/// end.
|
||||
async fn handle_message(&self, msg: AllMessages) -> bool {
|
||||
async fn handle_message(
|
||||
&self,
|
||||
msg: overseer::AvailabilityDistributionOutgoingMessages,
|
||||
) -> bool {
|
||||
let msg = AllMessages::from(msg);
|
||||
match msg {
|
||||
AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(
|
||||
reqs,
|
||||
|
||||
@@ -33,8 +33,8 @@ use futures::{
|
||||
};
|
||||
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{AllMessages, ChainApiMessage},
|
||||
ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, SubsystemContext,
|
||||
messages::{ChainApiMessage, RuntimeApiMessage},
|
||||
overseer, ActivatedLeaf, ActiveLeavesUpdate, LeafStatus,
|
||||
};
|
||||
use polkadot_node_subsystem_util::runtime::{get_occupied_cores, RuntimeInfo};
|
||||
use polkadot_primitives::v2::{CandidateHash, Hash, OccupiedCore, SessionIndex};
|
||||
@@ -78,6 +78,7 @@ pub struct Requester {
|
||||
metrics: Metrics,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)]
|
||||
impl Requester {
|
||||
/// How many ancestors of the leaf should we consider along with it.
|
||||
pub(crate) const LEAF_ANCESTRY_LEN_WITHIN_SESSION: usize = 3;
|
||||
@@ -99,10 +100,7 @@ impl Requester {
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
update: ActiveLeavesUpdate,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
{
|
||||
) -> Result<()> {
|
||||
gum::trace!(target: LOG_TARGET, ?update, "Update fetching heads");
|
||||
let ActiveLeavesUpdate { activated, deactivated } = update;
|
||||
// Stale leaves happen after a reversion - we don't want to re-run availability there.
|
||||
@@ -125,13 +123,11 @@ impl Requester {
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
new_head: ActivatedLeaf,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
{
|
||||
) -> Result<()> {
|
||||
let sender = &mut ctx.sender().clone();
|
||||
let ActivatedLeaf { hash: leaf, .. } = new_head;
|
||||
let (leaf_session_index, ancestors_in_session) = get_block_ancestors_in_same_session(
|
||||
ctx,
|
||||
sender,
|
||||
runtime,
|
||||
leaf,
|
||||
Self::LEAF_ANCESTRY_LEN_WITHIN_SESSION,
|
||||
@@ -139,7 +135,7 @@ impl Requester {
|
||||
.await?;
|
||||
// Also spawn or bump tasks for candidates in ancestry in the same session.
|
||||
for hash in std::iter::once(leaf).chain(ancestors_in_session) {
|
||||
let cores = get_occupied_cores(ctx, hash).await?;
|
||||
let cores = get_occupied_cores(sender, hash).await?;
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
occupied_cores = ?cores,
|
||||
@@ -177,15 +173,12 @@ impl Requester {
|
||||
/// passed in leaf might be some later block where the candidate is still pending availability.
|
||||
async fn add_cores<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
context: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
leaf: Hash,
|
||||
leaf_session_index: SessionIndex,
|
||||
cores: impl IntoIterator<Item = OccupiedCore>,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
{
|
||||
) -> Result<()> {
|
||||
for core in cores {
|
||||
match self.fetches.entry(core.candidate_hash) {
|
||||
Entry::Occupied(mut e) =>
|
||||
@@ -200,7 +193,7 @@ impl Requester {
|
||||
let task_cfg = self
|
||||
.session_cache
|
||||
.with_session_info(
|
||||
ctx,
|
||||
context,
|
||||
runtime,
|
||||
// We use leaf here, the relay_parent must be in the same session as the
|
||||
// leaf. This is guaranteed by runtime which ensures that cores are cleared
|
||||
@@ -221,7 +214,7 @@ impl Requester {
|
||||
});
|
||||
|
||||
if let Ok(Some(task_cfg)) = task_cfg {
|
||||
e.insert(FetchTask::start(task_cfg, ctx).await?);
|
||||
e.insert(FetchTask::start(task_cfg, context).await?);
|
||||
}
|
||||
// Not a validator, nothing to do.
|
||||
},
|
||||
@@ -232,9 +225,9 @@ impl Requester {
|
||||
}
|
||||
|
||||
impl Stream for Requester {
|
||||
type Item = AllMessages;
|
||||
type Item = overseer::AvailabilityDistributionOutgoingMessages;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Option<AllMessages>> {
|
||||
fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context) -> Poll<Option<Self::Item>> {
|
||||
loop {
|
||||
match Pin::new(&mut self.rx).poll_next(ctx) {
|
||||
Poll::Ready(Some(FromFetchTask::Message(m))) => return Poll::Ready(Some(m)),
|
||||
@@ -257,26 +250,27 @@ impl Stream for Requester {
|
||||
/// Requests up to `limit` ancestor hashes of relay parent in the same session.
|
||||
///
|
||||
/// Also returns session index of the `head`.
|
||||
async fn get_block_ancestors_in_same_session<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn get_block_ancestors_in_same_session<Sender>(
|
||||
sender: &mut Sender,
|
||||
runtime: &mut RuntimeInfo,
|
||||
head: Hash,
|
||||
limit: usize,
|
||||
) -> Result<(SessionIndex, Vec<Hash>)>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
Sender:
|
||||
overseer::SubsystemSender<RuntimeApiMessage> + overseer::SubsystemSender<ChainApiMessage>,
|
||||
{
|
||||
// The order is parent, grandparent, ...
|
||||
//
|
||||
// `limit + 1` since a session index for the last element in ancestry
|
||||
// is obtained through its parent. It always gets truncated because
|
||||
// `session_ancestry_len` can only be incremented `ancestors.len() - 1` times.
|
||||
let mut ancestors = get_block_ancestors(ctx, head, limit + 1).await?;
|
||||
let mut ancestors = get_block_ancestors(sender, head, limit + 1).await?;
|
||||
let mut ancestors_iter = ancestors.iter();
|
||||
|
||||
// `head` is the child of the first block in `ancestors`, request its session index.
|
||||
let head_session_index = match ancestors_iter.next() {
|
||||
Some(parent) => runtime.get_session_index_for_child(ctx.sender(), *parent).await?,
|
||||
Some(parent) => runtime.get_session_index_for_child(sender, *parent).await?,
|
||||
None => {
|
||||
// No first element, i.e. empty.
|
||||
return Ok((0, ancestors))
|
||||
@@ -287,7 +281,7 @@ where
|
||||
// The first parent is skipped.
|
||||
for parent in ancestors_iter {
|
||||
// Parent is the i-th ancestor, request session index for its child -- (i-1)th element.
|
||||
let session_index = runtime.get_session_index_for_child(ctx.sender(), *parent).await?;
|
||||
let session_index = runtime.get_session_index_for_child(sender, *parent).await?;
|
||||
if session_index == head_session_index {
|
||||
session_ancestry_len += 1;
|
||||
} else {
|
||||
@@ -302,21 +296,22 @@ where
|
||||
}
|
||||
|
||||
/// Request up to `limit` ancestor hashes of relay parent from the Chain API.
|
||||
async fn get_block_ancestors<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn get_block_ancestors<Sender>(
|
||||
sender: &mut Sender,
|
||||
relay_parent: Hash,
|
||||
limit: usize,
|
||||
) -> Result<Vec<Hash>>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
Sender: overseer::SubsystemSender<ChainApiMessage>,
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
ctx.send_message(ChainApiMessage::Ancestors {
|
||||
hash: relay_parent,
|
||||
k: limit,
|
||||
response_channel: tx,
|
||||
})
|
||||
.await;
|
||||
sender
|
||||
.send_message(ChainApiMessage::Ancestors {
|
||||
hash: relay_parent,
|
||||
k: limit,
|
||||
response_channel: tx,
|
||||
})
|
||||
.await;
|
||||
|
||||
let ancestors = rx
|
||||
.await
|
||||
|
||||
@@ -19,7 +19,7 @@ use std::collections::HashSet;
|
||||
use lru::LruCache;
|
||||
use rand::{seq::SliceRandom, thread_rng};
|
||||
|
||||
use polkadot_node_subsystem::SubsystemContext;
|
||||
use polkadot_node_subsystem::overseer;
|
||||
use polkadot_node_subsystem_util::runtime::RuntimeInfo;
|
||||
use polkadot_primitives::v2::{
|
||||
AuthorityDiscoveryId, GroupIndex, Hash, SessionIndex, ValidatorIndex,
|
||||
@@ -79,6 +79,7 @@ pub struct BadValidators {
|
||||
pub bad_validators: Vec<AuthorityDiscoveryId>,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(AvailabilityDistribution, prefix = self::overseer)]
|
||||
impl SessionCache {
|
||||
/// Create a new `SessionCache`.
|
||||
pub fn new() -> Self {
|
||||
@@ -103,7 +104,6 @@ impl SessionCache {
|
||||
with_info: F,
|
||||
) -> Result<Option<R>>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
F: FnOnce(&SessionInfo) -> R,
|
||||
{
|
||||
if let Some(o_info) = self.session_info_cache.get(&session_index) {
|
||||
@@ -178,10 +178,7 @@ impl SessionCache {
|
||||
runtime: &mut RuntimeInfo,
|
||||
relay_parent: Hash,
|
||||
session_index: SessionIndex,
|
||||
) -> Result<Option<SessionInfo>>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
{
|
||||
) -> Result<Option<SessionInfo>> {
|
||||
let info = runtime
|
||||
.get_session_info_by_index(ctx.sender(), relay_parent, session_index)
|
||||
.await?;
|
||||
|
||||
@@ -43,7 +43,7 @@ pub async fn run_pov_receiver<Sender>(
|
||||
mut receiver: IncomingRequestReceiver<v1::PoVFetchingRequest>,
|
||||
metrics: Metrics,
|
||||
) where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<AvailabilityStoreMessage>,
|
||||
{
|
||||
loop {
|
||||
match receiver.recv(|| vec![COST_INVALID_REQUEST]).await.into_nested() {
|
||||
@@ -71,7 +71,7 @@ pub async fn run_chunk_receiver<Sender>(
|
||||
mut receiver: IncomingRequestReceiver<v1::ChunkFetchingRequest>,
|
||||
metrics: Metrics,
|
||||
) where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<AvailabilityStoreMessage>,
|
||||
{
|
||||
loop {
|
||||
match receiver.recv(|| vec![COST_INVALID_REQUEST]).await.into_nested() {
|
||||
@@ -105,7 +105,7 @@ pub async fn answer_pov_request_log<Sender>(
|
||||
req: IncomingRequest<v1::PoVFetchingRequest>,
|
||||
metrics: &Metrics,
|
||||
) where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<AvailabilityStoreMessage>,
|
||||
{
|
||||
let res = answer_pov_request(sender, req).await;
|
||||
match res {
|
||||
@@ -130,7 +130,7 @@ pub async fn answer_chunk_request_log<Sender>(
|
||||
metrics: &Metrics,
|
||||
) -> ()
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<AvailabilityStoreMessage>,
|
||||
{
|
||||
let res = answer_chunk_request(sender, req).await;
|
||||
match res {
|
||||
@@ -154,7 +154,7 @@ pub async fn answer_pov_request<Sender>(
|
||||
req: IncomingRequest<v1::PoVFetchingRequest>,
|
||||
) -> Result<bool>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<AvailabilityStoreMessage>,
|
||||
{
|
||||
let _span = jaeger::Span::new(req.payload.candidate_hash, "answer-pov-request");
|
||||
|
||||
@@ -182,7 +182,7 @@ pub async fn answer_chunk_request<Sender>(
|
||||
req: IncomingRequest<v1::ChunkFetchingRequest>,
|
||||
) -> Result<bool>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<AvailabilityStoreMessage>,
|
||||
{
|
||||
let span = jaeger::Span::new(req.payload.candidate_hash, "answer-chunk-request");
|
||||
|
||||
@@ -217,7 +217,7 @@ async fn query_chunk<Sender>(
|
||||
validator_index: ValidatorIndex,
|
||||
) -> std::result::Result<Option<ErasureChunk>, JfyiError>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<AvailabilityStoreMessage>,
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender
|
||||
@@ -245,7 +245,7 @@ async fn query_available_data<Sender>(
|
||||
candidate_hash: CandidateHash,
|
||||
) -> Result<Option<AvailableData>>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<AvailabilityStoreMessage>,
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender
|
||||
|
||||
@@ -51,9 +51,8 @@ use polkadot_node_subsystem::{
|
||||
errors::RecoveryError,
|
||||
jaeger,
|
||||
messages::{AvailabilityRecoveryMessage, AvailabilityStoreMessage, NetworkBridgeMessage},
|
||||
overseer::{self, Subsystem},
|
||||
ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext,
|
||||
SubsystemError, SubsystemResult, SubsystemSender,
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError,
|
||||
SubsystemResult,
|
||||
};
|
||||
use polkadot_node_subsystem_util::request_session_info;
|
||||
use polkadot_primitives::v2::{
|
||||
@@ -156,8 +155,8 @@ enum Source {
|
||||
|
||||
/// A stateful reconstruction of availability data in reference to
|
||||
/// a candidate hash.
|
||||
struct RecoveryTask<S> {
|
||||
sender: S,
|
||||
struct RecoveryTask<Sender> {
|
||||
sender: Sender,
|
||||
|
||||
/// The parameters of the recovery process.
|
||||
params: RecoveryParams,
|
||||
@@ -177,7 +176,7 @@ impl RequestFromBackers {
|
||||
async fn run(
|
||||
&mut self,
|
||||
params: &RecoveryParams,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::AvailabilityRecoverySenderTrait,
|
||||
) -> Result<AvailableData, RecoveryError> {
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
@@ -199,13 +198,10 @@ impl RequestFromBackers {
|
||||
);
|
||||
|
||||
sender
|
||||
.send_message(
|
||||
NetworkBridgeMessage::SendRequests(
|
||||
vec![Requests::AvailableDataFetchingV1(req)],
|
||||
IfDisconnected::ImmediateError,
|
||||
)
|
||||
.into(),
|
||||
)
|
||||
.send_message(NetworkBridgeMessage::SendRequests(
|
||||
vec![Requests::AvailableDataFetchingV1(req)],
|
||||
IfDisconnected::ImmediateError,
|
||||
))
|
||||
.await;
|
||||
|
||||
match response.await {
|
||||
@@ -298,11 +294,13 @@ impl RequestChunksFromValidators {
|
||||
)
|
||||
}
|
||||
|
||||
async fn launch_parallel_requests(
|
||||
async fn launch_parallel_requests<Sender>(
|
||||
&mut self,
|
||||
params: &RecoveryParams,
|
||||
sender: &mut impl SubsystemSender,
|
||||
) {
|
||||
sender: &mut Sender,
|
||||
) where
|
||||
Sender: overseer::AvailabilityRecoverySenderTrait,
|
||||
{
|
||||
let num_requests = self.get_desired_request_count(params.threshold);
|
||||
let candidate_hash = ¶ms.candidate_hash;
|
||||
let already_requesting_count = self.requesting_chunks.len();
|
||||
@@ -358,9 +356,10 @@ impl RequestChunksFromValidators {
|
||||
}
|
||||
|
||||
sender
|
||||
.send_message(
|
||||
NetworkBridgeMessage::SendRequests(requests, IfDisconnected::ImmediateError).into(),
|
||||
)
|
||||
.send_message(NetworkBridgeMessage::SendRequests(
|
||||
requests,
|
||||
IfDisconnected::ImmediateError,
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -483,20 +482,21 @@ impl RequestChunksFromValidators {
|
||||
}
|
||||
}
|
||||
|
||||
async fn run(
|
||||
async fn run<Sender>(
|
||||
&mut self,
|
||||
params: &RecoveryParams,
|
||||
sender: &mut impl SubsystemSender,
|
||||
) -> Result<AvailableData, RecoveryError> {
|
||||
sender: &mut Sender,
|
||||
) -> Result<AvailableData, RecoveryError>
|
||||
where
|
||||
Sender: overseer::AvailabilityRecoverySenderTrait,
|
||||
{
|
||||
let metrics = ¶ms.metrics;
|
||||
|
||||
// First query the store for any chunks we've got.
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
sender
|
||||
.send_message(
|
||||
AvailabilityStoreMessage::QueryAllChunks(params.candidate_hash, tx).into(),
|
||||
)
|
||||
.send_message(AvailabilityStoreMessage::QueryAllChunks(params.candidate_hash, tx))
|
||||
.await;
|
||||
|
||||
match rx.await {
|
||||
@@ -646,16 +646,19 @@ fn reconstructed_data_matches_root(
|
||||
branches.root() == *expected_root
|
||||
}
|
||||
|
||||
impl<S: SubsystemSender> RecoveryTask<S> {
|
||||
impl<Sender> RecoveryTask<Sender>
|
||||
where
|
||||
Sender: overseer::AvailabilityRecoverySenderTrait,
|
||||
{
|
||||
async fn run(mut self) -> Result<AvailableData, RecoveryError> {
|
||||
// First just see if we have the data available locally.
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
self.sender
|
||||
.send_message(
|
||||
AvailabilityStoreMessage::QueryAvailableData(self.params.candidate_hash, tx)
|
||||
.into(),
|
||||
)
|
||||
.send_message(AvailabilityStoreMessage::QueryAvailableData(
|
||||
self.params.candidate_hash,
|
||||
tx,
|
||||
))
|
||||
.await;
|
||||
|
||||
match rx.await {
|
||||
@@ -799,11 +802,8 @@ impl Default for State {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> Subsystem<Context, SubsystemError> for AvailabilityRecoverySubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
{
|
||||
#[overseer::subsystem(AvailabilityRecovery, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context> AvailabilityRecoverySubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = self
|
||||
.run(ctx)
|
||||
@@ -832,6 +832,7 @@ async fn handle_signal(state: &mut State, signal: OverseerSignal) -> SubsystemRe
|
||||
}
|
||||
|
||||
/// Machinery around launching recovery tasks into the background.
|
||||
#[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)]
|
||||
async fn launch_recovery_task<Context>(
|
||||
state: &mut State,
|
||||
ctx: &mut Context,
|
||||
@@ -840,11 +841,7 @@ async fn launch_recovery_task<Context>(
|
||||
backing_group: Option<GroupIndex>,
|
||||
response_sender: oneshot::Sender<Result<AvailableData, RecoveryError>>,
|
||||
metrics: &Metrics,
|
||||
) -> error::Result<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
{
|
||||
) -> error::Result<()> {
|
||||
let candidate_hash = receipt.hash();
|
||||
|
||||
let params = RecoveryParams {
|
||||
@@ -885,6 +882,7 @@ where
|
||||
}
|
||||
|
||||
/// Handles an availability recovery request.
|
||||
#[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)]
|
||||
async fn handle_recover<Context>(
|
||||
state: &mut State,
|
||||
ctx: &mut Context,
|
||||
@@ -893,11 +891,7 @@ async fn handle_recover<Context>(
|
||||
backing_group: Option<GroupIndex>,
|
||||
response_sender: oneshot::Sender<Result<AvailableData, RecoveryError>>,
|
||||
metrics: &Metrics,
|
||||
) -> error::Result<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
{
|
||||
) -> error::Result<()> {
|
||||
let candidate_hash = receipt.hash();
|
||||
|
||||
let span = jaeger::Span::new(candidate_hash, "availbility-recovery")
|
||||
@@ -953,14 +947,11 @@ where
|
||||
}
|
||||
|
||||
/// Queries a chunk from av-store.
|
||||
#[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)]
|
||||
async fn query_full_data<Context>(
|
||||
ctx: &mut Context,
|
||||
candidate_hash: CandidateHash,
|
||||
) -> error::Result<Option<AvailableData>>
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
{
|
||||
) -> error::Result<Option<AvailableData>> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
ctx.send_message(AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx))
|
||||
.await;
|
||||
@@ -968,6 +959,7 @@ where
|
||||
Ok(rx.await.map_err(error::Error::CanceledQueryFullData)?)
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)]
|
||||
impl AvailabilityRecoverySubsystem {
|
||||
/// Create a new instance of `AvailabilityRecoverySubsystem` which starts with a fast path to
|
||||
/// request data from backers.
|
||||
@@ -986,11 +978,7 @@ impl AvailabilityRecoverySubsystem {
|
||||
Self { fast_path: false, req_receiver, metrics }
|
||||
}
|
||||
|
||||
async fn run<Context>(self, mut ctx: Context) -> SubsystemResult<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
Context: overseer::SubsystemContext<Message = AvailabilityRecoveryMessage>,
|
||||
{
|
||||
async fn run<Context>(self, mut ctx: Context) -> SubsystemResult<()> {
|
||||
let mut state = State::default();
|
||||
let Self { fast_path, mut req_receiver, metrics } = self;
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ use polkadot_node_network_protocol::{
|
||||
};
|
||||
use polkadot_node_subsystem::{
|
||||
jaeger, messages::*, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan,
|
||||
SpawnedSubsystem, SubsystemContext, SubsystemError, SubsystemResult,
|
||||
SpawnedSubsystem, SubsystemError, SubsystemResult,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{self as util};
|
||||
use polkadot_primitives::v2::{
|
||||
@@ -204,6 +204,7 @@ pub struct BitfieldDistribution {
|
||||
metrics: Metrics,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(BitfieldDistribution, prefix = self::overseer)]
|
||||
impl BitfieldDistribution {
|
||||
/// Create a new instance of the `BitfieldDistribution` subsystem.
|
||||
pub fn new(metrics: Metrics) -> Self {
|
||||
@@ -211,11 +212,7 @@ impl BitfieldDistribution {
|
||||
}
|
||||
|
||||
/// Start processing work as passed on from the Overseer.
|
||||
async fn run<Context>(self, ctx: Context)
|
||||
where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
async fn run<Context>(self, ctx: Context) {
|
||||
let mut state = ProtocolState::default();
|
||||
let mut rng = rand::rngs::StdRng::from_entropy();
|
||||
self.run_inner(ctx, &mut state, &mut rng).await
|
||||
@@ -226,10 +223,7 @@ impl BitfieldDistribution {
|
||||
mut ctx: Context,
|
||||
state: &mut ProtocolState,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
) {
|
||||
// work: process incoming messages from the overseer and process accordingly.
|
||||
|
||||
loop {
|
||||
@@ -316,17 +310,20 @@ impl BitfieldDistribution {
|
||||
}
|
||||
|
||||
/// Modify the reputation of a peer based on its behavior.
|
||||
async fn modify_reputation<Context>(ctx: &mut Context, relay_parent: Hash, peer: PeerId, rep: Rep)
|
||||
where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
async fn modify_reputation(
|
||||
sender: &mut impl overseer::BitfieldDistributionSenderTrait,
|
||||
relay_parent: Hash,
|
||||
peer: PeerId,
|
||||
rep: Rep,
|
||||
) {
|
||||
gum::trace!(target: LOG_TARGET, ?relay_parent, ?rep, %peer, "reputation change");
|
||||
|
||||
ctx.send_message(NetworkBridgeMessage::ReportPeer(peer, rep)).await
|
||||
sender.send_message(NetworkBridgeMessage::ReportPeer(peer, rep)).await
|
||||
}
|
||||
/// Distribute a given valid and signature checked bitfield message.
|
||||
///
|
||||
/// For this variant the source is this node.
|
||||
#[overseer::contextbounds(BitfieldDistribution, prefix=self::overseer)]
|
||||
async fn handle_bitfield_distribution<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut ProtocolState,
|
||||
@@ -334,9 +331,7 @@ async fn handle_bitfield_distribution<Context>(
|
||||
relay_parent: Hash,
|
||||
signed_availability: SignedAvailabilityBitfield,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
) {
|
||||
let _timer = metrics.time_handle_bitfield_distribution();
|
||||
|
||||
// Ignore anything the overseer did not tell this subsystem to work on
|
||||
@@ -389,6 +384,7 @@ async fn handle_bitfield_distribution<Context>(
|
||||
/// Distribute a given valid and signature checked bitfield message.
|
||||
///
|
||||
/// Can be originated by another subsystem or received via network from another peer.
|
||||
#[overseer::contextbounds(BitfieldDistribution, prefix=self::overseer)]
|
||||
async fn relay_message<Context>(
|
||||
ctx: &mut Context,
|
||||
job_data: &mut PerRelayParentData,
|
||||
@@ -398,9 +394,7 @@ async fn relay_message<Context>(
|
||||
message: BitfieldGossipMessage,
|
||||
required_routing: RequiredRouting,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
) {
|
||||
let relay_parent = message.relay_parent;
|
||||
let span = job_data.span.child("relay-msg");
|
||||
|
||||
@@ -478,6 +472,7 @@ async fn relay_message<Context>(
|
||||
}
|
||||
|
||||
/// Handle an incoming message from a peer.
|
||||
#[overseer::contextbounds(BitfieldDistribution, prefix=self::overseer)]
|
||||
async fn process_incoming_peer_message<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut ProtocolState,
|
||||
@@ -485,9 +480,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
origin: PeerId,
|
||||
message: protocol_v1::BitfieldDistributionMessage,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
) {
|
||||
let protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) = message;
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
@@ -497,7 +490,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
);
|
||||
// we don't care about this, not part of our view.
|
||||
if !state.view.contains(&relay_parent) {
|
||||
modify_reputation(ctx, relay_parent, origin, COST_NOT_IN_VIEW).await;
|
||||
modify_reputation(ctx.sender(), relay_parent, origin, COST_NOT_IN_VIEW).await;
|
||||
return
|
||||
}
|
||||
|
||||
@@ -506,7 +499,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
let job_data: &mut _ = if let Some(ref mut job_data) = job_data {
|
||||
job_data
|
||||
} else {
|
||||
modify_reputation(ctx, relay_parent, origin, COST_NOT_IN_VIEW).await;
|
||||
modify_reputation(ctx.sender(), relay_parent, origin, COST_NOT_IN_VIEW).await;
|
||||
return
|
||||
};
|
||||
|
||||
@@ -523,7 +516,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
let validator_set = &job_data.validator_set;
|
||||
if validator_set.is_empty() {
|
||||
gum::trace!(target: LOG_TARGET, ?relay_parent, ?origin, "Validator set is empty",);
|
||||
modify_reputation(ctx, relay_parent, origin, COST_MISSING_PEER_SESSION_KEY).await;
|
||||
modify_reputation(ctx.sender(), relay_parent, origin, COST_MISSING_PEER_SESSION_KEY).await;
|
||||
return
|
||||
}
|
||||
|
||||
@@ -533,7 +526,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
let validator = if let Some(validator) = validator_set.get(validator_index.0 as usize) {
|
||||
validator.clone()
|
||||
} else {
|
||||
modify_reputation(ctx, relay_parent, origin, COST_VALIDATOR_INDEX_INVALID).await;
|
||||
modify_reputation(ctx.sender(), relay_parent, origin, COST_VALIDATOR_INDEX_INVALID).await;
|
||||
return
|
||||
};
|
||||
|
||||
@@ -546,7 +539,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
received_set.insert(validator.clone());
|
||||
} else {
|
||||
gum::trace!(target: LOG_TARGET, ?validator_index, ?origin, "Duplicate message");
|
||||
modify_reputation(ctx, relay_parent, origin, COST_PEER_DUPLICATE_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), relay_parent, origin, COST_PEER_DUPLICATE_MESSAGE).await;
|
||||
return
|
||||
};
|
||||
|
||||
@@ -560,13 +553,13 @@ async fn process_incoming_peer_message<Context>(
|
||||
"already received a message for validator",
|
||||
);
|
||||
if old_message.signed_availability.as_unchecked() == &bitfield {
|
||||
modify_reputation(ctx, relay_parent, origin, BENEFIT_VALID_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), relay_parent, origin, BENEFIT_VALID_MESSAGE).await;
|
||||
}
|
||||
return
|
||||
}
|
||||
let signed_availability = match bitfield.try_into_checked(&signing_context, &validator) {
|
||||
Err(_) => {
|
||||
modify_reputation(ctx, relay_parent, origin, COST_SIGNATURE_INVALID).await;
|
||||
modify_reputation(ctx.sender(), relay_parent, origin, COST_SIGNATURE_INVALID).await;
|
||||
return
|
||||
},
|
||||
Ok(bitfield) => bitfield,
|
||||
@@ -592,20 +585,19 @@ async fn process_incoming_peer_message<Context>(
|
||||
)
|
||||
.await;
|
||||
|
||||
modify_reputation(ctx, relay_parent, origin, BENEFIT_VALID_MESSAGE_FIRST).await
|
||||
modify_reputation(ctx.sender(), relay_parent, origin, BENEFIT_VALID_MESSAGE_FIRST).await
|
||||
}
|
||||
|
||||
/// Deal with network bridge updates and track what needs to be tracked
|
||||
/// which depends on the message type received.
|
||||
#[overseer::contextbounds(BitfieldDistribution, prefix=self::overseer)]
|
||||
async fn handle_network_msg<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut ProtocolState,
|
||||
metrics: &Metrics,
|
||||
bridge_message: NetworkBridgeEvent<net_protocol::BitfieldDistributionMessage>,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
) {
|
||||
let _timer = metrics.time_handle_network_msg();
|
||||
|
||||
match bridge_message {
|
||||
@@ -677,15 +669,14 @@ fn handle_our_view_change(state: &mut ProtocolState, view: OurView) {
|
||||
|
||||
// Send the difference between two views which were not sent
|
||||
// to that particular peer.
|
||||
#[overseer::contextbounds(BitfieldDistribution, prefix=self::overseer)]
|
||||
async fn handle_peer_view_change<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut ProtocolState,
|
||||
origin: PeerId,
|
||||
view: View,
|
||||
rng: &mut (impl CryptoRng + Rng),
|
||||
) where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
) {
|
||||
let added = state
|
||||
.peer_views
|
||||
.entry(origin.clone())
|
||||
@@ -736,15 +727,14 @@ async fn handle_peer_view_change<Context>(
|
||||
}
|
||||
|
||||
/// Send a gossip message and track it in the per relay parent data.
|
||||
#[overseer::contextbounds(BitfieldDistribution, prefix=self::overseer)]
|
||||
async fn send_tracked_gossip_message<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut ProtocolState,
|
||||
dest: PeerId,
|
||||
validator: ValidatorId,
|
||||
message: BitfieldGossipMessage,
|
||||
) where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
) {
|
||||
let job_data = if let Some(job_data) = state.per_relay_parent.get_mut(&message.relay_parent) {
|
||||
job_data
|
||||
} else {
|
||||
@@ -773,11 +763,8 @@ async fn send_tracked_gossip_message<Context>(
|
||||
.await;
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for BitfieldDistribution
|
||||
where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
#[overseer::subsystem(BitfieldDistribution, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context> BitfieldDistribution {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = self.run(ctx).map(|_| Ok(())).boxed();
|
||||
|
||||
@@ -786,13 +773,11 @@ where
|
||||
}
|
||||
|
||||
/// Query our validator set and signing context for a particular relay parent.
|
||||
#[overseer::contextbounds(BitfieldDistribution, prefix=self::overseer)]
|
||||
async fn query_basics<Context>(
|
||||
ctx: &mut Context,
|
||||
relay_parent: Hash,
|
||||
) -> SubsystemResult<Option<(Vec<ValidatorId>, SigningContext)>>
|
||||
where
|
||||
Context: SubsystemContext<Message = BitfieldDistributionMessage>,
|
||||
{
|
||||
) -> SubsystemResult<Option<(Vec<ValidatorId>, SigningContext)>> {
|
||||
let (validators_tx, validators_rx) = oneshot::channel();
|
||||
let (session_tx, session_rx) = oneshot::channel();
|
||||
|
||||
|
||||
@@ -33,17 +33,18 @@ use polkadot_node_network_protocol::{
|
||||
v1 as protocol_v1, ObservedRole, OurView, PeerId, ProtocolVersion,
|
||||
UnifiedReputationChange as Rep, Versioned, View,
|
||||
};
|
||||
|
||||
use polkadot_node_subsystem::{
|
||||
errors::{SubsystemError, SubsystemResult},
|
||||
messages::{
|
||||
network_bridge_event::{NewGossipTopology, TopologyPeerInfo},
|
||||
AllMessages, CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeMessage,
|
||||
ApprovalDistributionMessage, BitfieldDistributionMessage, CollatorProtocolMessage,
|
||||
GossipSupportMessage, NetworkBridgeEvent, NetworkBridgeMessage,
|
||||
StatementDistributionMessage,
|
||||
},
|
||||
overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem,
|
||||
SubsystemContext, SubsystemSender,
|
||||
};
|
||||
use polkadot_node_subsystem_util::metrics::{self, prometheus};
|
||||
use polkadot_overseer::gen::{OverseerError, Subsystem};
|
||||
use polkadot_overseer::gen::OverseerError;
|
||||
use polkadot_primitives::v2::{AuthorityDiscoveryId, BlockNumber, Hash, ValidatorIndex};
|
||||
|
||||
/// Peer set info for network initialization.
|
||||
@@ -67,6 +68,9 @@ use network::{send_message, Network};
|
||||
|
||||
use crate::network::get_peer_id_by_authority_id;
|
||||
|
||||
mod metrics;
|
||||
use self::metrics::Metrics;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
@@ -83,206 +87,6 @@ const EMPTY_VIEW_COST: Rep = Rep::CostMajor("Peer sent us an empty view");
|
||||
// network bridge log target
|
||||
const LOG_TARGET: &'static str = "parachain::network-bridge";
|
||||
|
||||
/// Metrics for the network bridge.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Metrics(Option<MetricsInner>);
|
||||
|
||||
fn peer_set_label(peer_set: PeerSet, version: ProtocolVersion) -> &'static str {
|
||||
// Higher level code is meant to protect against this ever happening.
|
||||
peer_set.get_protocol_name_static(version).unwrap_or("<internal error>")
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
fn on_peer_connected(&self, peer_set: PeerSet, version: ProtocolVersion) {
|
||||
self.0.as_ref().map(|metrics| {
|
||||
metrics
|
||||
.connected_events
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc()
|
||||
});
|
||||
}
|
||||
|
||||
fn on_peer_disconnected(&self, peer_set: PeerSet, version: ProtocolVersion) {
|
||||
self.0.as_ref().map(|metrics| {
|
||||
metrics
|
||||
.disconnected_events
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc()
|
||||
});
|
||||
}
|
||||
|
||||
fn note_peer_count(&self, peer_set: PeerSet, version: ProtocolVersion, count: usize) {
|
||||
self.0.as_ref().map(|metrics| {
|
||||
metrics
|
||||
.peer_count
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.set(count as u64)
|
||||
});
|
||||
}
|
||||
|
||||
fn on_notification_received(&self, peer_set: PeerSet, version: ProtocolVersion, size: usize) {
|
||||
if let Some(metrics) = self.0.as_ref() {
|
||||
metrics
|
||||
.notifications_received
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc();
|
||||
|
||||
metrics
|
||||
.bytes_received
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc_by(size as u64);
|
||||
}
|
||||
}
|
||||
|
||||
fn on_notification_sent(
|
||||
&self,
|
||||
peer_set: PeerSet,
|
||||
version: ProtocolVersion,
|
||||
size: usize,
|
||||
to_peers: usize,
|
||||
) {
|
||||
if let Some(metrics) = self.0.as_ref() {
|
||||
metrics
|
||||
.notifications_sent
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc_by(to_peers as u64);
|
||||
|
||||
metrics
|
||||
.bytes_sent
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc_by((size * to_peers) as u64);
|
||||
}
|
||||
}
|
||||
|
||||
fn note_desired_peer_count(&self, peer_set: PeerSet, size: usize) {
|
||||
self.0.as_ref().map(|metrics| {
|
||||
metrics
|
||||
.desired_peer_count
|
||||
.with_label_values(&[peer_set.get_default_protocol_name()])
|
||||
.set(size as u64)
|
||||
});
|
||||
}
|
||||
|
||||
fn on_report_event(&self) {
|
||||
if let Some(metrics) = self.0.as_ref() {
|
||||
metrics.report_events.inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MetricsInner {
|
||||
peer_count: prometheus::GaugeVec<prometheus::U64>,
|
||||
connected_events: prometheus::CounterVec<prometheus::U64>,
|
||||
disconnected_events: prometheus::CounterVec<prometheus::U64>,
|
||||
desired_peer_count: prometheus::GaugeVec<prometheus::U64>,
|
||||
report_events: prometheus::Counter<prometheus::U64>,
|
||||
|
||||
notifications_received: prometheus::CounterVec<prometheus::U64>,
|
||||
notifications_sent: prometheus::CounterVec<prometheus::U64>,
|
||||
|
||||
bytes_received: prometheus::CounterVec<prometheus::U64>,
|
||||
bytes_sent: prometheus::CounterVec<prometheus::U64>,
|
||||
}
|
||||
|
||||
impl metrics::Metrics for Metrics {
|
||||
fn try_register(
|
||||
registry: &prometheus::Registry,
|
||||
) -> std::result::Result<Self, prometheus::PrometheusError> {
|
||||
let metrics = MetricsInner {
|
||||
peer_count: prometheus::register(
|
||||
prometheus::GaugeVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_peer_count",
|
||||
"The number of peers on a parachain-related peer-set",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
connected_events: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_peer_connect_events_total",
|
||||
"The number of peer connect events on a parachain notifications protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
disconnected_events: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_peer_disconnect_events_total",
|
||||
"The number of peer disconnect events on a parachain notifications protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
desired_peer_count: prometheus::register(
|
||||
prometheus::GaugeVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_desired_peer_count",
|
||||
"The number of peers that the local node is expected to connect to on a parachain-related peer-set (either including or not including unresolvable authorities, depending on whether `ConnectToValidators` or `ConnectToValidatorsResolved` was used.)",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
report_events: prometheus::register(
|
||||
prometheus::Counter::new(
|
||||
"polkadot_parachain_network_report_events_total",
|
||||
"The amount of reputation changes issued by subsystems",
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
notifications_received: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_notifications_received_total",
|
||||
"The number of notifications received on a parachain protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
notifications_sent: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_notifications_sent_total",
|
||||
"The number of notifications sent on a parachain protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
bytes_received: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_notification_bytes_received_total",
|
||||
"The number of bytes received on a parachain notification protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
bytes_sent: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_notification_bytes_sent_total",
|
||||
"The number of bytes sent on a parachain notification protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
};
|
||||
|
||||
Ok(Metrics(Some(metrics)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Messages from and to the network.
|
||||
///
|
||||
/// As transmitted to and received from subsystems.
|
||||
@@ -320,12 +124,11 @@ impl<N, AD> NetworkBridge<N, AD> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Net, AD, Context> Subsystem<Context, SubsystemError> for NetworkBridge<Net, AD>
|
||||
#[overseer::subsystem(NetworkBridge, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Net, AD, Context> NetworkBridge<Net, AD>
|
||||
where
|
||||
Net: Network + Sync,
|
||||
AD: validator_discovery::AuthorityDiscovery + Clone,
|
||||
Context: SubsystemContext<Message = NetworkBridgeMessage>
|
||||
+ overseer::SubsystemContext<Message = NetworkBridgeMessage>,
|
||||
{
|
||||
fn start(mut self, ctx: Context) -> SpawnedSubsystem {
|
||||
// The stream of networking events has to be created at initialization, otherwise the
|
||||
@@ -382,6 +185,7 @@ enum Mode {
|
||||
Active,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(NetworkBridge, prefix = self::overseer)]
|
||||
async fn handle_subsystem_messages<Context, N, AD>(
|
||||
mut ctx: Context,
|
||||
mut network_service: N,
|
||||
@@ -391,8 +195,6 @@ async fn handle_subsystem_messages<Context, N, AD>(
|
||||
metrics: Metrics,
|
||||
) -> Result<(), UnexpectedAbort>
|
||||
where
|
||||
Context: SubsystemContext<Message = NetworkBridgeMessage>,
|
||||
Context: overseer::SubsystemContext<Message = NetworkBridgeMessage>,
|
||||
N: Network,
|
||||
AD: validator_discovery::AuthorityDiscovery + Clone,
|
||||
{
|
||||
@@ -678,7 +480,7 @@ where
|
||||
}
|
||||
|
||||
async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
|
||||
mut sender: impl SubsystemSender,
|
||||
mut sender: impl overseer::NetworkBridgeSenderTrait,
|
||||
mut network_service: impl Network,
|
||||
network_stream: BoxStream<'static, NetworkEvent>,
|
||||
mut authority_discovery_service: AD,
|
||||
@@ -1031,6 +833,7 @@ async fn handle_network_messages<AD: validator_discovery::AuthorityDiscovery>(
|
||||
/// #fn is_send<T: Send>();
|
||||
/// #is_send::<parking_lot::MutexGuard<'static, ()>();
|
||||
/// ```
|
||||
#[overseer::contextbounds(NetworkBridge, prefix = self::overseer)]
|
||||
async fn run_network<N, AD, Context>(
|
||||
bridge: NetworkBridge<N, AD>,
|
||||
mut ctx: Context,
|
||||
@@ -1039,8 +842,6 @@ async fn run_network<N, AD, Context>(
|
||||
where
|
||||
N: Network,
|
||||
AD: validator_discovery::AuthorityDiscovery + Clone,
|
||||
Context: SubsystemContext<Message = NetworkBridgeMessage>
|
||||
+ overseer::SubsystemContext<Message = NetworkBridgeMessage>,
|
||||
{
|
||||
let shared = Shared::default();
|
||||
|
||||
@@ -1105,14 +906,17 @@ fn construct_view(
|
||||
View::new(live_heads.take(MAX_VIEW_HEADS), finalized_number)
|
||||
}
|
||||
|
||||
fn update_our_view(
|
||||
net: &mut impl Network,
|
||||
ctx: &mut impl SubsystemContext<Message = NetworkBridgeMessage, AllMessages = AllMessages>,
|
||||
#[overseer::contextbounds(NetworkBridge, prefix = self::overseer)]
|
||||
fn update_our_view<Net, Context>(
|
||||
net: &mut Net,
|
||||
ctx: &mut Context,
|
||||
live_heads: &[ActivatedLeaf],
|
||||
shared: &Shared,
|
||||
finalized_number: BlockNumber,
|
||||
metrics: &Metrics,
|
||||
) {
|
||||
) where
|
||||
Net: Network,
|
||||
{
|
||||
let new_view = construct_view(live_heads.iter().map(|v| v.hash), finalized_number);
|
||||
|
||||
let (validation_peers, collation_peers) = {
|
||||
@@ -1238,54 +1042,79 @@ fn send_collation_message_v1(
|
||||
|
||||
async fn dispatch_validation_event_to_all(
|
||||
event: NetworkBridgeEvent<net_protocol::VersionedValidationProtocol>,
|
||||
ctx: &mut impl SubsystemSender,
|
||||
ctx: &mut impl overseer::NetworkBridgeSenderTrait,
|
||||
) {
|
||||
dispatch_validation_events_to_all(std::iter::once(event), ctx).await
|
||||
}
|
||||
|
||||
async fn dispatch_collation_event_to_all(
|
||||
event: NetworkBridgeEvent<net_protocol::VersionedCollationProtocol>,
|
||||
ctx: &mut impl SubsystemSender,
|
||||
ctx: &mut impl overseer::NetworkBridgeSenderTrait,
|
||||
) {
|
||||
dispatch_collation_events_to_all(std::iter::once(event), ctx).await
|
||||
}
|
||||
|
||||
fn dispatch_validation_event_to_all_unbounded(
|
||||
event: NetworkBridgeEvent<net_protocol::VersionedValidationProtocol>,
|
||||
ctx: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::NetworkBridgeSenderTrait,
|
||||
) {
|
||||
for msg in AllMessages::dispatch_iter(event) {
|
||||
ctx.send_unbounded_message(msg);
|
||||
}
|
||||
event
|
||||
.focus()
|
||||
.ok()
|
||||
.map(StatementDistributionMessage::from)
|
||||
.and_then(|msg| Some(sender.send_unbounded_message(msg)));
|
||||
event
|
||||
.focus()
|
||||
.ok()
|
||||
.map(BitfieldDistributionMessage::from)
|
||||
.and_then(|msg| Some(sender.send_unbounded_message(msg)));
|
||||
event
|
||||
.focus()
|
||||
.ok()
|
||||
.map(ApprovalDistributionMessage::from)
|
||||
.and_then(|msg| Some(sender.send_unbounded_message(msg)));
|
||||
event
|
||||
.focus()
|
||||
.ok()
|
||||
.map(GossipSupportMessage::from)
|
||||
.and_then(|msg| Some(sender.send_unbounded_message(msg)));
|
||||
}
|
||||
|
||||
fn dispatch_collation_event_to_all_unbounded(
|
||||
event: NetworkBridgeEvent<net_protocol::VersionedCollationProtocol>,
|
||||
ctx: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::NetworkBridgeSenderTrait,
|
||||
) {
|
||||
if let Some(msg) = event.focus().ok().map(CollatorProtocolMessage::NetworkBridgeUpdate) {
|
||||
ctx.send_unbounded_message(msg.into());
|
||||
if let Ok(msg) = event.focus() {
|
||||
sender.send_unbounded_message(CollatorProtocolMessage::NetworkBridgeUpdate(msg))
|
||||
}
|
||||
}
|
||||
|
||||
async fn dispatch_validation_events_to_all<I>(events: I, ctx: &mut impl SubsystemSender)
|
||||
where
|
||||
async fn dispatch_validation_events_to_all<I>(
|
||||
events: I,
|
||||
sender: &mut impl overseer::NetworkBridgeSenderTrait,
|
||||
) where
|
||||
I: IntoIterator<Item = NetworkBridgeEvent<net_protocol::VersionedValidationProtocol>>,
|
||||
I::IntoIter: Send,
|
||||
{
|
||||
ctx.send_messages(events.into_iter().flat_map(AllMessages::dispatch_iter)).await
|
||||
for event in events {
|
||||
sender
|
||||
.send_messages(event.focus().map(StatementDistributionMessage::from))
|
||||
.await;
|
||||
sender.send_messages(event.focus().map(BitfieldDistributionMessage::from)).await;
|
||||
sender.send_messages(event.focus().map(ApprovalDistributionMessage::from)).await;
|
||||
sender.send_messages(event.focus().map(GossipSupportMessage::from)).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn dispatch_collation_events_to_all<I>(events: I, ctx: &mut impl SubsystemSender)
|
||||
where
|
||||
async fn dispatch_collation_events_to_all<I>(
|
||||
events: I,
|
||||
ctx: &mut impl overseer::NetworkBridgeSenderTrait,
|
||||
) where
|
||||
I: IntoIterator<Item = NetworkBridgeEvent<net_protocol::VersionedCollationProtocol>>,
|
||||
I::IntoIter: Send,
|
||||
{
|
||||
let messages_for = |event: NetworkBridgeEvent<net_protocol::VersionedCollationProtocol>| {
|
||||
event
|
||||
.focus()
|
||||
.ok()
|
||||
.map(|m| AllMessages::CollatorProtocol(CollatorProtocolMessage::NetworkBridgeUpdate(m)))
|
||||
event.focus().ok().map(|m| CollatorProtocolMessage::NetworkBridgeUpdate(m))
|
||||
};
|
||||
|
||||
ctx.send_messages(events.into_iter().flat_map(messages_for)).await
|
||||
|
||||
@@ -0,0 +1,223 @@
|
||||
// Copyright 2022 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use super::{PeerSet, ProtocolVersion};
|
||||
use polkadot_node_subsystem_util::metrics::{self, prometheus};
|
||||
|
||||
/// Metrics for the network bridge.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct Metrics(pub(crate) Option<MetricsInner>);
|
||||
|
||||
fn peer_set_label(peer_set: PeerSet, version: ProtocolVersion) -> &'static str {
|
||||
// Higher level code is meant to protect against this ever happening.
|
||||
peer_set.get_protocol_name_static(version).unwrap_or("<internal error>")
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
pub fn on_peer_connected(&self, peer_set: PeerSet, version: ProtocolVersion) {
|
||||
self.0.as_ref().map(|metrics| {
|
||||
metrics
|
||||
.connected_events
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc()
|
||||
});
|
||||
}
|
||||
|
||||
pub fn on_peer_disconnected(&self, peer_set: PeerSet, version: ProtocolVersion) {
|
||||
self.0.as_ref().map(|metrics| {
|
||||
metrics
|
||||
.disconnected_events
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc()
|
||||
});
|
||||
}
|
||||
|
||||
pub fn note_peer_count(&self, peer_set: PeerSet, version: ProtocolVersion, count: usize) {
|
||||
self.0.as_ref().map(|metrics| {
|
||||
metrics
|
||||
.peer_count
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.set(count as u64)
|
||||
});
|
||||
}
|
||||
|
||||
pub fn on_notification_received(
|
||||
&self,
|
||||
peer_set: PeerSet,
|
||||
version: ProtocolVersion,
|
||||
size: usize,
|
||||
) {
|
||||
if let Some(metrics) = self.0.as_ref() {
|
||||
metrics
|
||||
.notifications_received
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc();
|
||||
|
||||
metrics
|
||||
.bytes_received
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc_by(size as u64);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn on_notification_sent(
|
||||
&self,
|
||||
peer_set: PeerSet,
|
||||
version: ProtocolVersion,
|
||||
size: usize,
|
||||
to_peers: usize,
|
||||
) {
|
||||
if let Some(metrics) = self.0.as_ref() {
|
||||
metrics
|
||||
.notifications_sent
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc_by(to_peers as u64);
|
||||
|
||||
metrics
|
||||
.bytes_sent
|
||||
.with_label_values(&[peer_set_label(peer_set, version)])
|
||||
.inc_by((size * to_peers) as u64);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn note_desired_peer_count(&self, peer_set: PeerSet, size: usize) {
|
||||
self.0.as_ref().map(|metrics| {
|
||||
metrics
|
||||
.desired_peer_count
|
||||
.with_label_values(&[peer_set.get_default_protocol_name()])
|
||||
.set(size as u64)
|
||||
});
|
||||
}
|
||||
|
||||
pub fn on_report_event(&self) {
|
||||
if let Some(metrics) = self.0.as_ref() {
|
||||
metrics.report_events.inc()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MetricsInner {
|
||||
peer_count: prometheus::GaugeVec<prometheus::U64>,
|
||||
connected_events: prometheus::CounterVec<prometheus::U64>,
|
||||
disconnected_events: prometheus::CounterVec<prometheus::U64>,
|
||||
desired_peer_count: prometheus::GaugeVec<prometheus::U64>,
|
||||
report_events: prometheus::Counter<prometheus::U64>,
|
||||
|
||||
notifications_received: prometheus::CounterVec<prometheus::U64>,
|
||||
notifications_sent: prometheus::CounterVec<prometheus::U64>,
|
||||
|
||||
bytes_received: prometheus::CounterVec<prometheus::U64>,
|
||||
bytes_sent: prometheus::CounterVec<prometheus::U64>,
|
||||
}
|
||||
|
||||
impl metrics::Metrics for Metrics {
|
||||
fn try_register(
|
||||
registry: &prometheus::Registry,
|
||||
) -> std::result::Result<Self, prometheus::PrometheusError> {
|
||||
let metrics = MetricsInner {
|
||||
peer_count: prometheus::register(
|
||||
prometheus::GaugeVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_peer_count",
|
||||
"The number of peers on a parachain-related peer-set",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
connected_events: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_peer_connect_events_total",
|
||||
"The number of peer connect events on a parachain notifications protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
disconnected_events: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_peer_disconnect_events_total",
|
||||
"The number of peer disconnect events on a parachain notifications protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
desired_peer_count: prometheus::register(
|
||||
prometheus::GaugeVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_desired_peer_count",
|
||||
"The number of peers that the local node is expected to connect to on a parachain-related peer-set (either including or not including unresolvable authorities, depending on whether `ConnectToValidators` or `ConnectToValidatorsResolved` was used.)",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
report_events: prometheus::register(
|
||||
prometheus::Counter::new(
|
||||
"polkadot_parachain_network_report_events_total",
|
||||
"The amount of reputation changes issued by subsystems",
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
notifications_received: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_notifications_received_total",
|
||||
"The number of notifications received on a parachain protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
notifications_sent: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_notifications_sent_total",
|
||||
"The number of notifications sent on a parachain protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
bytes_received: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_notification_bytes_received_total",
|
||||
"The number of bytes received on a parachain notification protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
bytes_sent: prometheus::register(
|
||||
prometheus::CounterVec::new(
|
||||
prometheus::Opts::new(
|
||||
"polkadot_parachain_notification_bytes_sent_total",
|
||||
"The number of bytes sent on a parachain notification protocol",
|
||||
),
|
||||
&["protocol"]
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
};
|
||||
|
||||
Ok(Metrics(Some(metrics)))
|
||||
}
|
||||
}
|
||||
@@ -34,8 +34,8 @@ use polkadot_node_network_protocol::{
|
||||
use polkadot_node_subsystem::{
|
||||
jaeger,
|
||||
messages::{
|
||||
ApprovalDistributionMessage, BitfieldDistributionMessage, GossipSupportMessage,
|
||||
StatementDistributionMessage,
|
||||
AllMessages, ApprovalDistributionMessage, BitfieldDistributionMessage,
|
||||
GossipSupportMessage, StatementDistributionMessage,
|
||||
},
|
||||
ActiveLeavesUpdate, FromOverseer, LeafStatus, OverseerSignal,
|
||||
};
|
||||
@@ -313,8 +313,9 @@ async fn assert_sends_validation_event_to_all(
|
||||
event: NetworkBridgeEvent<net_protocol::VersionedValidationProtocol>,
|
||||
virtual_overseer: &mut TestSubsystemContextHandle<NetworkBridgeMessage>,
|
||||
) {
|
||||
// Ordering must match the enum variant order
|
||||
// in `AllMessages`.
|
||||
// Ordering must be consistent across:
|
||||
// `fn dispatch_validation_event_to_all_unbounded`
|
||||
// `dispatch_validation_events_to_all`
|
||||
assert_matches!(
|
||||
virtual_overseer.recv().await,
|
||||
AllMessages::StatementDistribution(
|
||||
@@ -1190,54 +1191,6 @@ fn send_messages_to_peers() {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn spread_event_to_subsystems_is_up_to_date() {
|
||||
// Number of subsystems expected to be interested in a network event,
|
||||
// and hence the network event broadcasted to.
|
||||
const EXPECTED_COUNT: usize = 4;
|
||||
|
||||
let mut cnt = 0_usize;
|
||||
for msg in AllMessages::dispatch_iter(NetworkBridgeEvent::PeerDisconnected(PeerId::random())) {
|
||||
match msg {
|
||||
AllMessages::Empty => unreachable!("Nobody cares about the dummy"),
|
||||
AllMessages::CandidateValidation(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::CandidateBacking(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::ChainApi(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::CollatorProtocol(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::StatementDistribution(_) => {
|
||||
cnt += 1;
|
||||
},
|
||||
AllMessages::AvailabilityDistribution(_) =>
|
||||
unreachable!("Not interested in network events"),
|
||||
AllMessages::AvailabilityRecovery(_) =>
|
||||
unreachable!("Not interested in network events"),
|
||||
AllMessages::BitfieldDistribution(_) => {
|
||||
cnt += 1;
|
||||
},
|
||||
AllMessages::BitfieldSigning(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::Provisioner(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::RuntimeApi(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::AvailabilityStore(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::NetworkBridge(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::CollationGeneration(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::ApprovalVoting(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::ApprovalDistribution(_) => {
|
||||
cnt += 1;
|
||||
},
|
||||
AllMessages::GossipSupport(_) => {
|
||||
cnt += 1;
|
||||
},
|
||||
AllMessages::DisputeCoordinator(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::DisputeDistribution(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::ChainSelection(_) => unreachable!("Not interested in network events"),
|
||||
AllMessages::PvfChecker(_) => unreachable!("Not interested in network events"),
|
||||
// Add variants here as needed, `{ cnt += 1; }` for those that need to be
|
||||
// notified, `unreachable!()` for those that should not.
|
||||
}
|
||||
}
|
||||
assert_eq!(cnt, EXPECTED_COUNT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn our_view_updates_decreasing_order_and_limited_to_max() {
|
||||
test_harness(done_syncing_oracle(), |test_harness| async move {
|
||||
|
||||
@@ -38,8 +38,10 @@ use polkadot_node_network_protocol::{
|
||||
use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement};
|
||||
use polkadot_node_subsystem::{
|
||||
jaeger,
|
||||
messages::{CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeMessage},
|
||||
overseer, FromOverseer, OverseerSignal, PerLeafSpan, SubsystemContext,
|
||||
messages::{
|
||||
CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeMessage, RuntimeApiMessage,
|
||||
},
|
||||
overseer, FromOverseer, OverseerSignal, PerLeafSpan,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{
|
||||
metrics::{self, prometheus},
|
||||
@@ -360,6 +362,7 @@ impl State {
|
||||
/// or the relay-parent isn't in the active-leaves set, we ignore the message
|
||||
/// as it must be invalid in that case - although this indicates a logic error
|
||||
/// elsewhere in the node.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn distribute_collation<Context>(
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
@@ -368,11 +371,7 @@ async fn distribute_collation<Context>(
|
||||
receipt: CandidateReceipt,
|
||||
pov: PoV,
|
||||
result_sender: Option<oneshot::Sender<CollationSecondedSignal>>,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> Result<()> {
|
||||
let relay_parent = receipt.descriptor.relay_parent;
|
||||
|
||||
// This collation is not in the active-leaves set.
|
||||
@@ -398,7 +397,7 @@ where
|
||||
|
||||
// Determine which core the para collated-on is assigned to.
|
||||
// If it is not scheduled then ignore the message.
|
||||
let (our_core, num_cores) = match determine_core(ctx, id, relay_parent).await? {
|
||||
let (our_core, num_cores) = match determine_core(ctx.sender(), id, relay_parent).await? {
|
||||
Some(core) => core,
|
||||
None => {
|
||||
gum::warn!(
|
||||
@@ -461,16 +460,12 @@ where
|
||||
|
||||
/// Get the Id of the Core that is assigned to the para being collated on if any
|
||||
/// and the total number of cores.
|
||||
async fn determine_core<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn determine_core(
|
||||
sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
|
||||
para_id: ParaId,
|
||||
relay_parent: Hash,
|
||||
) -> Result<Option<(CoreIndex, usize)>>
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
let cores = get_availability_cores(ctx, relay_parent).await?;
|
||||
) -> Result<Option<(CoreIndex, usize)>> {
|
||||
let cores = get_availability_cores(sender, relay_parent).await?;
|
||||
|
||||
for (idx, core) in cores.iter().enumerate() {
|
||||
if let CoreState::Scheduled(occupied) = core {
|
||||
@@ -493,17 +488,14 @@ struct GroupValidators {
|
||||
/// Figure out current group of validators assigned to the para being collated on.
|
||||
///
|
||||
/// Returns [`ValidatorId`]'s of current group as determined based on the `relay_parent`.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn determine_our_validators<Context>(
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
core_index: CoreIndex,
|
||||
cores: usize,
|
||||
relay_parent: Hash,
|
||||
) -> Result<GroupValidators>
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> Result<GroupValidators> {
|
||||
let session_index = runtime.get_session_index_for_child(ctx.sender(), relay_parent).await?;
|
||||
let info = &runtime
|
||||
.get_session_info_by_index(ctx.sender(), relay_parent, session_index)
|
||||
@@ -511,7 +503,7 @@ where
|
||||
.session_info;
|
||||
gum::debug!(target: LOG_TARGET, ?session_index, "Received session info");
|
||||
let groups = &info.validator_groups;
|
||||
let rotation_info = get_group_rotation_info(ctx, relay_parent).await?;
|
||||
let rotation_info = get_group_rotation_info(ctx.sender(), relay_parent).await?;
|
||||
|
||||
let current_group_index = rotation_info.group_for_core(core_index, cores);
|
||||
let current_validators = groups
|
||||
@@ -530,11 +522,8 @@ where
|
||||
}
|
||||
|
||||
/// Issue a `Declare` collation message to the given `peer`.
|
||||
async fn declare<Context>(ctx: &mut Context, state: &mut State, peer: PeerId)
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn declare<Context>(ctx: &mut Context, state: &mut State, peer: PeerId) {
|
||||
let declare_signature_payload = protocol_v1::declare_signature_payload(&state.local_peer_id);
|
||||
|
||||
if let Some(para_id) = state.collating_on {
|
||||
@@ -554,11 +543,11 @@ where
|
||||
|
||||
/// Issue a connection request to a set of validators and
|
||||
/// revoke the previous connection request.
|
||||
async fn connect_to_validators<Context>(ctx: &mut Context, validator_ids: Vec<AuthorityDiscoveryId>)
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn connect_to_validators<Context>(
|
||||
ctx: &mut Context,
|
||||
validator_ids: Vec<AuthorityDiscoveryId>,
|
||||
) {
|
||||
// ignore address resolution failure
|
||||
// will reissue a new request on new collation
|
||||
let (failed, _) = oneshot::channel();
|
||||
@@ -574,15 +563,13 @@ where
|
||||
///
|
||||
/// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is
|
||||
/// set as validator for our para at the given `relay_parent`.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn advertise_collation<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
relay_parent: Hash,
|
||||
peer: PeerId,
|
||||
) where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
let should_advertise = state
|
||||
.our_validators_groups
|
||||
.get(&relay_parent)
|
||||
@@ -635,16 +622,13 @@ async fn advertise_collation<Context>(
|
||||
}
|
||||
|
||||
/// The main incoming message dispatching switch.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn process_msg<Context>(
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
state: &mut State,
|
||||
msg: CollatorProtocolMessage,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> Result<()> {
|
||||
use CollatorProtocolMessage::*;
|
||||
|
||||
match msg {
|
||||
@@ -748,17 +732,14 @@ async fn send_collation(
|
||||
}
|
||||
|
||||
/// A networking messages switch.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn handle_incoming_peer_message<Context>(
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
state: &mut State,
|
||||
origin: PeerId,
|
||||
msg: protocol_v1::CollatorProtocolMessage,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> Result<()> {
|
||||
use protocol_v1::CollatorProtocolMessage::*;
|
||||
|
||||
match msg {
|
||||
@@ -831,15 +812,12 @@ where
|
||||
}
|
||||
|
||||
/// Process an incoming network request for a collation.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn handle_incoming_request<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
req: IncomingRequest<request_v1::CollationFetchingRequest>,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> Result<()> {
|
||||
let _span = state
|
||||
.span_per_relay_parent
|
||||
.get(&req.payload.relay_parent)
|
||||
@@ -907,15 +885,13 @@ where
|
||||
}
|
||||
|
||||
/// Our view has changed.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn handle_peer_view_change<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
peer_id: PeerId,
|
||||
view: View,
|
||||
) where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
let current = state.peer_views.entry(peer_id.clone()).or_default();
|
||||
|
||||
let added: Vec<Hash> = view.difference(&*current).cloned().collect();
|
||||
@@ -928,16 +904,13 @@ async fn handle_peer_view_change<Context>(
|
||||
}
|
||||
|
||||
/// Bridge messages switch.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn handle_network_msg<Context>(
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
state: &mut State,
|
||||
bridge_message: NetworkBridgeEvent<net_protocol::CollatorProtocolMessage>,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> Result<()> {
|
||||
use NetworkBridgeEvent::*;
|
||||
|
||||
match bridge_message {
|
||||
@@ -1021,17 +994,14 @@ async fn handle_our_view_change(state: &mut State, view: OurView) -> Result<()>
|
||||
}
|
||||
|
||||
/// The collator protocol collator side main loop.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = crate::overseer)]
|
||||
pub(crate) async fn run<Context>(
|
||||
mut ctx: Context,
|
||||
local_peer_id: PeerId,
|
||||
collator_pair: CollatorPair,
|
||||
mut req_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
|
||||
metrics: Metrics,
|
||||
) -> std::result::Result<(), FatalError>
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> std::result::Result<(), FatalError> {
|
||||
use OverseerSignal::*;
|
||||
|
||||
let mut state = State::new(local_peer_id, collator_pair, metrics);
|
||||
|
||||
@@ -34,9 +34,7 @@ use polkadot_node_network_protocol::{
|
||||
use polkadot_primitives::v2::CollatorPair;
|
||||
|
||||
use polkadot_node_subsystem::{
|
||||
errors::SubsystemError,
|
||||
messages::{CollatorProtocolMessage, NetworkBridgeMessage},
|
||||
overseer, SpawnedSubsystem, SubsystemContext, SubsystemSender,
|
||||
errors::SubsystemError, messages::NetworkBridgeMessage, overseer, SpawnedSubsystem,
|
||||
};
|
||||
|
||||
mod error;
|
||||
@@ -89,6 +87,7 @@ pub struct CollatorProtocolSubsystem {
|
||||
protocol_side: ProtocolSide,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
impl CollatorProtocolSubsystem {
|
||||
/// Start the collator protocol.
|
||||
/// If `id` is `Some` this is a collator side of the protocol.
|
||||
@@ -98,11 +97,7 @@ impl CollatorProtocolSubsystem {
|
||||
Self { protocol_side }
|
||||
}
|
||||
|
||||
async fn run<Context>(self, ctx: Context) -> std::result::Result<(), error::FatalError>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
async fn run<Context>(self, ctx: Context) -> std::result::Result<(), error::FatalError> {
|
||||
match self.protocol_side {
|
||||
ProtocolSide::Validator { keystore, eviction_policy, metrics } =>
|
||||
validator_side::run(ctx, keystore, eviction_policy, metrics).await,
|
||||
@@ -112,12 +107,8 @@ impl CollatorProtocolSubsystem {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for CollatorProtocolSubsystem
|
||||
where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
<Context as SubsystemContext>::Sender: SubsystemSender,
|
||||
{
|
||||
#[overseer::subsystem(CollatorProtocol, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context> CollatorProtocolSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = self
|
||||
.run(ctx)
|
||||
@@ -129,10 +120,11 @@ where
|
||||
}
|
||||
|
||||
/// Modify the reputation of a peer based on its behavior.
|
||||
async fn modify_reputation<Context>(ctx: &mut Context, peer: PeerId, rep: Rep)
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
{
|
||||
async fn modify_reputation(
|
||||
sender: &mut impl overseer::CollatorProtocolSenderTrait,
|
||||
peer: PeerId,
|
||||
rep: Rep,
|
||||
) {
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
rep = ?rep,
|
||||
@@ -140,5 +132,5 @@ where
|
||||
"reputation change for peer",
|
||||
);
|
||||
|
||||
ctx.send_message(NetworkBridgeMessage::ReportPeer(peer, rep)).await;
|
||||
sender.send_message(NetworkBridgeMessage::ReportPeer(peer, rep)).await;
|
||||
}
|
||||
|
||||
@@ -48,9 +48,9 @@ use polkadot_node_subsystem::{
|
||||
jaeger,
|
||||
messages::{
|
||||
CandidateBackingMessage, CollatorProtocolMessage, IfDisconnected, NetworkBridgeEvent,
|
||||
NetworkBridgeMessage,
|
||||
NetworkBridgeMessage, RuntimeApiMessage,
|
||||
},
|
||||
overseer, FromOverseer, OverseerSignal, PerLeafSpan, SubsystemContext, SubsystemSender,
|
||||
overseer, FromOverseer, OverseerSignal, PerLeafSpan, SubsystemSender,
|
||||
};
|
||||
use polkadot_node_subsystem_util::metrics::{self, prometheus};
|
||||
use polkadot_primitives::v2::{CandidateReceipt, CollatorId, Hash, Id as ParaId};
|
||||
@@ -362,7 +362,7 @@ struct ActiveParas {
|
||||
impl ActiveParas {
|
||||
async fn assign_incoming(
|
||||
&mut self,
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl SubsystemSender<RuntimeApiMessage>,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
new_relay_parents: impl IntoIterator<Item = Hash>,
|
||||
) {
|
||||
@@ -630,25 +630,19 @@ fn collator_peer_id(
|
||||
})
|
||||
}
|
||||
|
||||
async fn disconnect_peer<Context>(ctx: &mut Context, peer_id: PeerId)
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
ctx.send_message(NetworkBridgeMessage::DisconnectPeer(peer_id, PeerSet::Collation))
|
||||
async fn disconnect_peer(sender: &mut impl overseer::CollatorProtocolSenderTrait, peer_id: PeerId) {
|
||||
sender
|
||||
.send_message(NetworkBridgeMessage::DisconnectPeer(peer_id, PeerSet::Collation))
|
||||
.await
|
||||
}
|
||||
|
||||
/// Another subsystem has requested to fetch collations on a particular leaf for some para.
|
||||
async fn fetch_collation<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn fetch_collation(
|
||||
sender: &mut impl overseer::CollatorProtocolSenderTrait,
|
||||
state: &mut State,
|
||||
pc: PendingCollation,
|
||||
id: CollatorId,
|
||||
) where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
let PendingCollation { relay_parent, para_id, peer_id, .. } = pc;
|
||||
@@ -663,7 +657,7 @@ async fn fetch_collation<Context>(
|
||||
|
||||
if let Some(peer_data) = state.peer_data.get(&peer_id) {
|
||||
if peer_data.has_advertised(&relay_parent) {
|
||||
request_collation(ctx, state, relay_parent, para_id, peer_id, tx).await;
|
||||
request_collation(sender, state, relay_parent, para_id, peer_id, tx).await;
|
||||
} else {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
@@ -687,51 +681,44 @@ async fn fetch_collation<Context>(
|
||||
}
|
||||
|
||||
/// Report a collator for some malicious actions.
|
||||
async fn report_collator<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn report_collator(
|
||||
sender: &mut impl overseer::CollatorProtocolSenderTrait,
|
||||
peer_data: &HashMap<PeerId, PeerData>,
|
||||
id: CollatorId,
|
||||
) where
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
if let Some(peer_id) = collator_peer_id(peer_data, &id) {
|
||||
modify_reputation(ctx, peer_id, COST_REPORT_BAD).await;
|
||||
modify_reputation(sender, peer_id, COST_REPORT_BAD).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Some other subsystem has reported a collator as a good one, bump reputation.
|
||||
async fn note_good_collation<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn note_good_collation(
|
||||
sender: &mut impl overseer::CollatorProtocolSenderTrait,
|
||||
peer_data: &HashMap<PeerId, PeerData>,
|
||||
id: CollatorId,
|
||||
) where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
if let Some(peer_id) = collator_peer_id(peer_data, &id) {
|
||||
modify_reputation(ctx, peer_id, BENEFIT_NOTIFY_GOOD).await;
|
||||
modify_reputation(sender, peer_id, BENEFIT_NOTIFY_GOOD).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Notify a collator that its collation got seconded.
|
||||
async fn notify_collation_seconded<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn notify_collation_seconded(
|
||||
sender: &mut impl overseer::CollatorProtocolSenderTrait,
|
||||
peer_id: PeerId,
|
||||
relay_parent: Hash,
|
||||
statement: SignedFullStatement,
|
||||
) where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
let wire_message =
|
||||
protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement.into());
|
||||
ctx.send_message(NetworkBridgeMessage::SendCollationMessage(
|
||||
vec![peer_id],
|
||||
Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)),
|
||||
))
|
||||
.await;
|
||||
sender
|
||||
.send_message(NetworkBridgeMessage::SendCollationMessage(
|
||||
vec![peer_id],
|
||||
Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)),
|
||||
))
|
||||
.await;
|
||||
|
||||
modify_reputation(ctx, peer_id, BENEFIT_NOTIFY_GOOD).await;
|
||||
modify_reputation(sender, peer_id, BENEFIT_NOTIFY_GOOD).await;
|
||||
}
|
||||
|
||||
/// A peer's view has changed. A number of things should be done:
|
||||
@@ -754,17 +741,14 @@ async fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View)
|
||||
/// - Check if the requested collation is in our view.
|
||||
/// - Update `PerRequest` records with the `result` field if necessary.
|
||||
/// And as such invocations of this function may rely on that.
|
||||
async fn request_collation<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn request_collation(
|
||||
sender: &mut impl overseer::CollatorProtocolSenderTrait,
|
||||
state: &mut State,
|
||||
relay_parent: Hash,
|
||||
para_id: ParaId,
|
||||
peer_id: PeerId,
|
||||
result: oneshot::Sender<(CandidateReceipt, PoV)>,
|
||||
) where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
if !state.view.contains(&relay_parent) {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
@@ -815,29 +799,28 @@ async fn request_collation<Context>(
|
||||
"Requesting collation",
|
||||
);
|
||||
|
||||
ctx.send_message(NetworkBridgeMessage::SendRequests(
|
||||
vec![requests],
|
||||
IfDisconnected::ImmediateError,
|
||||
))
|
||||
.await;
|
||||
sender
|
||||
.send_message(NetworkBridgeMessage::SendRequests(
|
||||
vec![requests],
|
||||
IfDisconnected::ImmediateError,
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
/// Networking message has been received.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = overseer)]
|
||||
async fn process_incoming_peer_message<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
origin: PeerId,
|
||||
msg: protocol_v1::CollatorProtocolMessage,
|
||||
) where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
use protocol_v1::CollatorProtocolMessage::*;
|
||||
use sp_runtime::traits::AppVerify;
|
||||
match msg {
|
||||
Declare(collator_id, para_id, signature) => {
|
||||
if collator_peer_id(&state.peer_data, &collator_id).is_some() {
|
||||
modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
return
|
||||
}
|
||||
|
||||
@@ -850,7 +833,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
?para_id,
|
||||
"Unknown peer",
|
||||
);
|
||||
modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
return
|
||||
},
|
||||
};
|
||||
@@ -862,7 +845,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
?para_id,
|
||||
"Peer is not in the collating state",
|
||||
);
|
||||
modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
return
|
||||
}
|
||||
|
||||
@@ -873,7 +856,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
?para_id,
|
||||
"Signature verification failure",
|
||||
);
|
||||
modify_reputation(ctx, origin, COST_INVALID_SIGNATURE).await;
|
||||
modify_reputation(ctx.sender(), origin, COST_INVALID_SIGNATURE).await;
|
||||
return
|
||||
}
|
||||
|
||||
@@ -896,9 +879,9 @@ async fn process_incoming_peer_message<Context>(
|
||||
"Declared as collator for unneeded para",
|
||||
);
|
||||
|
||||
modify_reputation(ctx, origin.clone(), COST_UNNEEDED_COLLATOR).await;
|
||||
modify_reputation(ctx.sender(), origin.clone(), COST_UNNEEDED_COLLATOR).await;
|
||||
gum::trace!(target: LOG_TARGET, "Disconnecting unneeded collator");
|
||||
disconnect_peer(ctx, origin).await;
|
||||
disconnect_peer(ctx.sender(), origin).await;
|
||||
}
|
||||
},
|
||||
AdvertiseCollation(relay_parent) => {
|
||||
@@ -914,7 +897,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
"Advertise collation out of view",
|
||||
);
|
||||
|
||||
modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
return
|
||||
}
|
||||
|
||||
@@ -926,7 +909,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
?relay_parent,
|
||||
"Advertise collation message has been received from an unknown peer",
|
||||
);
|
||||
modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
return
|
||||
},
|
||||
Some(p) => p,
|
||||
@@ -962,7 +945,8 @@ async fn process_incoming_peer_message<Context>(
|
||||
collations.status = CollationStatus::Fetching;
|
||||
collations.waiting_collation = Some(id.clone());
|
||||
|
||||
fetch_collation(ctx, state, pending_collation.clone(), id).await;
|
||||
fetch_collation(ctx.sender(), state, pending_collation.clone(), id)
|
||||
.await;
|
||||
},
|
||||
CollationStatus::Seconded => {
|
||||
gum::trace!(
|
||||
@@ -984,7 +968,7 @@ async fn process_incoming_peer_message<Context>(
|
||||
"Invalid advertisement",
|
||||
);
|
||||
|
||||
modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await;
|
||||
},
|
||||
}
|
||||
},
|
||||
@@ -1011,16 +995,13 @@ async fn remove_relay_parent(state: &mut State, relay_parent: Hash) -> Result<()
|
||||
}
|
||||
|
||||
/// Our view has changed.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn handle_our_view_change<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
view: OurView,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> Result<()> {
|
||||
let old_view = std::mem::replace(&mut state.view, view);
|
||||
|
||||
let added: HashMap<Hash, Arc<jaeger::Span>> = state
|
||||
@@ -1061,7 +1042,7 @@ where
|
||||
?para_id,
|
||||
"Disconnecting peer on view change (not current parachain id)"
|
||||
);
|
||||
disconnect_peer(ctx, peer_id.clone()).await;
|
||||
disconnect_peer(ctx.sender(), peer_id.clone()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1070,16 +1051,13 @@ where
|
||||
}
|
||||
|
||||
/// Bridge event switch.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn handle_network_msg<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
bridge_message: NetworkBridgeEvent<net_protocol::CollatorProtocolMessage>,
|
||||
) -> Result<()>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> Result<()> {
|
||||
use NetworkBridgeEvent::*;
|
||||
|
||||
match bridge_message {
|
||||
@@ -1109,15 +1087,13 @@ where
|
||||
}
|
||||
|
||||
/// The main message receiver switch.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn process_msg<Context>(
|
||||
ctx: &mut Context,
|
||||
keystore: &SyncCryptoStorePtr,
|
||||
msg: CollatorProtocolMessage,
|
||||
state: &mut State,
|
||||
) where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
use CollatorProtocolMessage::*;
|
||||
|
||||
let _timer = state.metrics.time_process_msg();
|
||||
@@ -1137,7 +1113,7 @@ async fn process_msg<Context>(
|
||||
);
|
||||
},
|
||||
ReportCollator(id) => {
|
||||
report_collator(ctx, &state.peer_data, id).await;
|
||||
report_collator(ctx.sender(), &state.peer_data, id).await;
|
||||
},
|
||||
NetworkBridgeUpdate(event) => {
|
||||
if let Err(e) = handle_network_msg(ctx, state, keystore, event).await {
|
||||
@@ -1152,8 +1128,8 @@ async fn process_msg<Context>(
|
||||
if let Some(collation_event) = state.pending_candidates.remove(&parent) {
|
||||
let (collator_id, pending_collation) = collation_event;
|
||||
let PendingCollation { relay_parent, peer_id, .. } = pending_collation;
|
||||
note_good_collation(ctx, &state.peer_data, collator_id).await;
|
||||
notify_collation_seconded(ctx, peer_id, relay_parent, stmt).await;
|
||||
note_good_collation(ctx.sender(), &state.peer_data, collator_id).await;
|
||||
notify_collation_seconded(ctx.sender(), peer_id, relay_parent, stmt).await;
|
||||
|
||||
if let Some(collations) = state.collations_per_relay_parent.get_mut(&parent) {
|
||||
collations.status = CollationStatus::Seconded;
|
||||
@@ -1184,7 +1160,7 @@ async fn process_msg<Context>(
|
||||
Entry::Vacant(_) => return,
|
||||
};
|
||||
|
||||
report_collator(ctx, &state.peer_data, id.clone()).await;
|
||||
report_collator(ctx.sender(), &state.peer_data, id.clone()).await;
|
||||
|
||||
dequeue_next_collation_and_fetch(ctx, state, parent, id).await;
|
||||
},
|
||||
@@ -1211,16 +1187,13 @@ fn infinite_stream(every: Duration) -> impl FusedStream<Item = ()> {
|
||||
}
|
||||
|
||||
/// The main run loop.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
pub(crate) async fn run<Context>(
|
||||
mut ctx: Context,
|
||||
keystore: SyncCryptoStorePtr,
|
||||
eviction_policy: crate::CollatorEvictionPolicy,
|
||||
metrics: Metrics,
|
||||
) -> std::result::Result<(), crate::error::FatalError>
|
||||
where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) -> std::result::Result<(), crate::error::FatalError> {
|
||||
let mut state = State { metrics, ..Default::default() };
|
||||
|
||||
let next_inactivity_stream = infinite_stream(ACTIVITY_POLL);
|
||||
@@ -1247,7 +1220,7 @@ where
|
||||
}
|
||||
}
|
||||
_ = next_inactivity_stream.next() => {
|
||||
disconnect_inactive_peers(&mut ctx, &eviction_policy, &state.peer_data).await;
|
||||
disconnect_inactive_peers(ctx.sender(), &eviction_policy, &state.peer_data).await;
|
||||
}
|
||||
res = state.collation_fetches.select_next_some() => {
|
||||
handle_collation_fetched_result(&mut ctx, &mut state, res).await;
|
||||
@@ -1270,7 +1243,7 @@ where
|
||||
).await;
|
||||
|
||||
for (peer_id, rep) in reputation_changes {
|
||||
modify_reputation(&mut ctx, peer_id, rep).await;
|
||||
modify_reputation(ctx.sender(), peer_id, rep).await;
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -1304,9 +1277,9 @@ async fn poll_requests(
|
||||
}
|
||||
|
||||
/// Dequeue another collation and fetch.
|
||||
async fn dequeue_next_collation_and_fetch(
|
||||
ctx: &mut (impl SubsystemContext<Message = CollatorProtocolMessage>
|
||||
+ overseer::SubsystemContext<Message = CollatorProtocolMessage>),
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn dequeue_next_collation_and_fetch<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
relay_parent: Hash,
|
||||
// The collator we tried to fetch from last.
|
||||
@@ -1323,7 +1296,7 @@ async fn dequeue_next_collation_and_fetch(
|
||||
?id,
|
||||
"Successfully dequeued next advertisement - fetching ..."
|
||||
);
|
||||
fetch_collation(ctx, state, next, id).await;
|
||||
fetch_collation(ctx.sender(), state, next, id).await;
|
||||
} else {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
@@ -1335,14 +1308,12 @@ async fn dequeue_next_collation_and_fetch(
|
||||
}
|
||||
|
||||
/// Handle a fetched collation result.
|
||||
#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)]
|
||||
async fn handle_collation_fetched_result<Context>(
|
||||
ctx: &mut Context,
|
||||
state: &mut State,
|
||||
(mut collation_event, res): PendingCollationFetch,
|
||||
) where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
// If no prior collation for this relay parent has been seconded, then
|
||||
// memorize the `collation_event` for that `relay_parent`, such that we may
|
||||
// notify the collator of their successful second backing
|
||||
@@ -1380,12 +1351,13 @@ async fn handle_collation_fetched_result<Context>(
|
||||
|
||||
if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) {
|
||||
collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash);
|
||||
ctx.send_message(CandidateBackingMessage::Second(
|
||||
relay_parent.clone(),
|
||||
candidate_receipt,
|
||||
pov,
|
||||
))
|
||||
.await;
|
||||
ctx.sender()
|
||||
.send_message(CandidateBackingMessage::Second(
|
||||
relay_parent.clone(),
|
||||
candidate_receipt,
|
||||
pov,
|
||||
))
|
||||
.await;
|
||||
|
||||
entry.insert(collation_event);
|
||||
} else {
|
||||
@@ -1401,18 +1373,15 @@ async fn handle_collation_fetched_result<Context>(
|
||||
// This issues `NetworkBridge` notifications to disconnect from all inactive peers at the
|
||||
// earliest possible point. This does not yet clean up any metadata, as that will be done upon
|
||||
// receipt of the `PeerDisconnected` event.
|
||||
async fn disconnect_inactive_peers<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn disconnect_inactive_peers(
|
||||
sender: &mut impl overseer::CollatorProtocolSenderTrait,
|
||||
eviction_policy: &crate::CollatorEvictionPolicy,
|
||||
peers: &HashMap<PeerId, PeerData>,
|
||||
) where
|
||||
Context: overseer::SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
Context: SubsystemContext<Message = CollatorProtocolMessage>,
|
||||
{
|
||||
) {
|
||||
for (peer, peer_data) in peers {
|
||||
if peer_data.is_inactive(&eviction_policy) {
|
||||
gum::trace!(target: LOG_TARGET, "Disconnecting inactive peer");
|
||||
disconnect_peer(ctx, peer.clone()).await;
|
||||
disconnect_peer(sender, peer.clone()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ use polkadot_node_network_protocol::request_response::{incoming::IncomingRequest
|
||||
use polkadot_node_primitives::DISPUTE_WINDOW;
|
||||
use polkadot_node_subsystem::{
|
||||
messages::DisputeDistributionMessage, overseer, FromOverseer, OverseerSignal, SpawnedSubsystem,
|
||||
SubsystemContext, SubsystemError,
|
||||
SubsystemError,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{runtime, runtime::RuntimeInfo};
|
||||
|
||||
@@ -114,12 +114,11 @@ pub struct DisputeDistributionSubsystem<AD> {
|
||||
metrics: Metrics,
|
||||
}
|
||||
|
||||
impl<Context, AD> overseer::Subsystem<Context, SubsystemError> for DisputeDistributionSubsystem<AD>
|
||||
#[overseer::subsystem(DisputeDistribution, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Context, AD> DisputeDistributionSubsystem<AD>
|
||||
where
|
||||
Context: SubsystemContext<Message = DisputeDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = DisputeDistributionMessage>
|
||||
+ Sync
|
||||
+ Send,
|
||||
<Context as overseer::DisputeDistributionContextTrait>::Sender:
|
||||
overseer::DisputeDistributionSenderTrait + Sync + Send,
|
||||
AD: AuthorityDiscovery + Clone,
|
||||
{
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
@@ -132,6 +131,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
|
||||
impl<AD> DisputeDistributionSubsystem<AD>
|
||||
where
|
||||
AD: AuthorityDiscovery + Clone,
|
||||
@@ -160,13 +160,7 @@ where
|
||||
}
|
||||
|
||||
/// Start processing work as passed on from the Overseer.
|
||||
async fn run<Context>(mut self, mut ctx: Context) -> std::result::Result<(), FatalError>
|
||||
where
|
||||
Context: SubsystemContext<Message = DisputeDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = DisputeDistributionMessage>
|
||||
+ Sync
|
||||
+ Send,
|
||||
{
|
||||
async fn run<Context>(mut self, mut ctx: Context) -> std::result::Result<(), FatalError> {
|
||||
let receiver = DisputesReceiver::new(
|
||||
ctx.sender().clone(),
|
||||
self.req_receiver
|
||||
@@ -205,7 +199,7 @@ where
|
||||
}
|
||||
|
||||
/// Handle overseer signals.
|
||||
async fn handle_signals<Context: SubsystemContext>(
|
||||
async fn handle_signals<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
signal: OverseerSignal,
|
||||
@@ -221,7 +215,7 @@ where
|
||||
}
|
||||
|
||||
/// Handle `DisputeDistributionMessage`s.
|
||||
async fn handle_subsystem_message<Context: SubsystemContext>(
|
||||
async fn handle_subsystem_message<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
msg: DisputeDistributionMessage,
|
||||
@@ -243,10 +237,10 @@ enum MuxedMessage {
|
||||
Sender(Option<TaskFinish>),
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
|
||||
impl MuxedMessage {
|
||||
async fn receive(
|
||||
ctx: &mut (impl SubsystemContext<Message = DisputeDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = DisputeDistributionMessage>),
|
||||
async fn receive<Context>(
|
||||
ctx: &mut Context,
|
||||
from_sender: &mut mpsc::Receiver<TaskFinish>,
|
||||
) -> Self {
|
||||
// We are only fusing here to make `select` happy, in reality we will quit if the stream
|
||||
|
||||
@@ -40,8 +40,8 @@ use polkadot_node_network_protocol::{
|
||||
};
|
||||
use polkadot_node_primitives::DISPUTE_WINDOW;
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{AllMessages, DisputeCoordinatorMessage, ImportStatementsResult},
|
||||
SubsystemSender,
|
||||
messages::{DisputeCoordinatorMessage, ImportStatementsResult},
|
||||
overseer,
|
||||
};
|
||||
use polkadot_node_subsystem_util::{runtime, runtime::RuntimeInfo};
|
||||
|
||||
@@ -132,9 +132,10 @@ impl MuxedMessage {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Sender: SubsystemSender, AD> DisputesReceiver<Sender, AD>
|
||||
impl<Sender, AD> DisputesReceiver<Sender, AD>
|
||||
where
|
||||
AD: AuthorityDiscovery,
|
||||
Sender: overseer::DisputeDistributionSenderTrait,
|
||||
{
|
||||
/// Create a new receiver which can be `run`.
|
||||
pub fn new(
|
||||
@@ -265,15 +266,13 @@ where
|
||||
let (pending_confirmation, confirmation_rx) = oneshot::channel();
|
||||
let candidate_hash = candidate_receipt.hash();
|
||||
self.sender
|
||||
.send_message(AllMessages::DisputeCoordinator(
|
||||
DisputeCoordinatorMessage::ImportStatements {
|
||||
candidate_hash,
|
||||
candidate_receipt,
|
||||
session: valid_vote.0.session_index(),
|
||||
statements: vec![valid_vote, invalid_vote],
|
||||
pending_confirmation: Some(pending_confirmation),
|
||||
},
|
||||
))
|
||||
.send_message(DisputeCoordinatorMessage::ImportStatements {
|
||||
candidate_hash,
|
||||
candidate_receipt,
|
||||
session: valid_vote.0.session_index(),
|
||||
statements: vec![valid_vote, invalid_vote],
|
||||
pending_confirmation: Some(pending_confirmation),
|
||||
})
|
||||
.await;
|
||||
|
||||
self.pending_imports.push(peer, confirmation_rx, pending_response);
|
||||
|
||||
@@ -20,10 +20,7 @@ use futures::channel::{mpsc, oneshot};
|
||||
|
||||
use polkadot_node_network_protocol::request_response::v1::DisputeRequest;
|
||||
use polkadot_node_primitives::{CandidateVotes, DisputeMessage, SignedDisputeStatement};
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{AllMessages, DisputeCoordinatorMessage},
|
||||
ActiveLeavesUpdate, SubsystemContext,
|
||||
};
|
||||
use polkadot_node_subsystem::{messages::DisputeCoordinatorMessage, overseer, ActiveLeavesUpdate};
|
||||
use polkadot_node_subsystem_util::runtime::RuntimeInfo;
|
||||
use polkadot_primitives::v2::{CandidateHash, DisputeStatement, Hash, SessionIndex};
|
||||
|
||||
@@ -66,6 +63,7 @@ pub struct DisputeSender {
|
||||
metrics: Metrics,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
|
||||
impl DisputeSender {
|
||||
/// Create a new `DisputeSender` which can be used to start dispute sendings.
|
||||
pub fn new(tx: mpsc::Sender<TaskFinish>, metrics: Metrics) -> Self {
|
||||
@@ -79,7 +77,7 @@ impl DisputeSender {
|
||||
}
|
||||
|
||||
/// Create a `SendTask` for a particular new dispute.
|
||||
pub async fn start_sender<Context: SubsystemContext>(
|
||||
pub async fn start_sender<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
@@ -114,7 +112,7 @@ impl DisputeSender {
|
||||
/// - Get new authorities to send messages to.
|
||||
/// - Get rid of obsolete tasks and disputes.
|
||||
/// - Get dispute sending started in case we missed one for some reason (e.g. on node startup)
|
||||
pub async fn update_leaves<Context: SubsystemContext>(
|
||||
pub async fn update_leaves<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
@@ -179,7 +177,7 @@ impl DisputeSender {
|
||||
/// Call `start_sender` on all passed in disputes.
|
||||
///
|
||||
/// Recover necessary votes for building up `DisputeMessage` and start sending for all of them.
|
||||
async fn start_send_for_dispute<Context: SubsystemContext>(
|
||||
async fn start_send_for_dispute<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
@@ -303,7 +301,7 @@ impl DisputeSender {
|
||||
/// Make active sessions correspond to currently active heads.
|
||||
///
|
||||
/// Returns: true if sessions changed.
|
||||
async fn refresh_sessions<Context: SubsystemContext>(
|
||||
async fn refresh_sessions<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
@@ -321,7 +319,8 @@ impl DisputeSender {
|
||||
/// Retrieve the currently active sessions.
|
||||
///
|
||||
/// List is all indices of all active sessions together with the head that was used for the query.
|
||||
async fn get_active_session_indices<Context: SubsystemContext>(
|
||||
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
|
||||
async fn get_active_session_indices<Context>(
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
active_heads: &Vec<Hash>,
|
||||
@@ -336,27 +335,29 @@ async fn get_active_session_indices<Context: SubsystemContext>(
|
||||
}
|
||||
|
||||
/// Retrieve Set of active disputes from the dispute coordinator.
|
||||
async fn get_active_disputes<Context: SubsystemContext>(
|
||||
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
|
||||
async fn get_active_disputes<Context>(
|
||||
ctx: &mut Context,
|
||||
) -> JfyiErrorResult<Vec<(SessionIndex, CandidateHash)>> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
// Caller scope is in `update_leaves` and this is bounded by fork count.
|
||||
ctx.send_unbounded_message(AllMessages::DisputeCoordinator(
|
||||
DisputeCoordinatorMessage::ActiveDisputes(tx),
|
||||
));
|
||||
ctx.send_unbounded_message(DisputeCoordinatorMessage::ActiveDisputes(tx));
|
||||
rx.await.map_err(|_| JfyiError::AskActiveDisputesCanceled)
|
||||
}
|
||||
|
||||
/// Get all locally available dispute votes for a given dispute.
|
||||
async fn get_candidate_votes<Context: SubsystemContext>(
|
||||
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
|
||||
async fn get_candidate_votes<Context>(
|
||||
ctx: &mut Context,
|
||||
session_index: SessionIndex,
|
||||
candidate_hash: CandidateHash,
|
||||
) -> JfyiErrorResult<Option<CandidateVotes>> {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
// Caller scope is in `update_leaves` and this is bounded by fork count.
|
||||
ctx.send_unbounded_message(AllMessages::DisputeCoordinator(
|
||||
DisputeCoordinatorMessage::QueryCandidateVotes(vec![(session_index, candidate_hash)], tx),
|
||||
ctx.send_unbounded_message(DisputeCoordinatorMessage::QueryCandidateVotes(
|
||||
vec![(session_index, candidate_hash)],
|
||||
tx,
|
||||
));
|
||||
rx.await
|
||||
.map(|v| v.get(0).map(|inner| inner.to_owned().2))
|
||||
|
||||
@@ -26,10 +26,7 @@ use polkadot_node_network_protocol::{
|
||||
},
|
||||
IfDisconnected,
|
||||
};
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{AllMessages, NetworkBridgeMessage},
|
||||
SubsystemContext,
|
||||
};
|
||||
use polkadot_node_subsystem::{messages::NetworkBridgeMessage, overseer};
|
||||
use polkadot_node_subsystem_util::{metrics, runtime::RuntimeInfo};
|
||||
use polkadot_primitives::v2::{
|
||||
AuthorityDiscoveryId, CandidateHash, Hash, SessionIndex, ValidatorIndex,
|
||||
@@ -100,9 +97,10 @@ impl TaskResult {
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
|
||||
impl SendTask {
|
||||
/// Initiates sending a dispute message to peers.
|
||||
pub async fn new<Context: SubsystemContext>(
|
||||
pub async fn new<Context>(
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
active_sessions: &HashMap<SessionIndex, Hash>,
|
||||
@@ -120,7 +118,7 @@ impl SendTask {
|
||||
///
|
||||
/// This function is called at construction and should also be called whenever a session change
|
||||
/// happens and on a regular basis to ensure we are retrying failed attempts.
|
||||
pub async fn refresh_sends<Context: SubsystemContext>(
|
||||
pub async fn refresh_sends<Context>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
@@ -197,7 +195,8 @@ impl SendTask {
|
||||
///
|
||||
/// This is all parachain validators of the session the candidate occurred and all authorities
|
||||
/// of all currently active sessions, determined by currently active heads.
|
||||
async fn get_relevant_validators<Context: SubsystemContext>(
|
||||
|
||||
async fn get_relevant_validators<Context>(
|
||||
&self,
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
@@ -241,7 +240,8 @@ impl SendTask {
|
||||
/// Start sending of the given message to all given authorities.
|
||||
///
|
||||
/// And spawn tasks for handling the response.
|
||||
async fn send_requests<Context: SubsystemContext>(
|
||||
#[overseer::contextbounds(DisputeDistribution, prefix = self::overseer)]
|
||||
async fn send_requests<Context>(
|
||||
ctx: &mut Context,
|
||||
tx: mpsc::Sender<TaskFinish>,
|
||||
receivers: Vec<AuthorityDiscoveryId>,
|
||||
@@ -271,7 +271,7 @@ async fn send_requests<Context: SubsystemContext>(
|
||||
}
|
||||
|
||||
let msg = NetworkBridgeMessage::SendRequests(reqs, IfDisconnected::ImmediateError);
|
||||
ctx.send_message(AllMessages::NetworkBridge(msg)).await;
|
||||
ctx.send_message(msg).await;
|
||||
Ok(statuses)
|
||||
}
|
||||
|
||||
|
||||
@@ -48,8 +48,7 @@ use polkadot_node_subsystem::{
|
||||
GossipSupportMessage, NetworkBridgeEvent, NetworkBridgeMessage, RuntimeApiMessage,
|
||||
RuntimeApiRequest,
|
||||
},
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext,
|
||||
SubsystemError,
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError,
|
||||
};
|
||||
use polkadot_node_subsystem_util as util;
|
||||
use polkadot_primitives::v2::{
|
||||
@@ -115,6 +114,7 @@ pub struct GossipSupport<AD> {
|
||||
metrics: Metrics,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(GossipSupport, prefix = self::overseer)]
|
||||
impl<AD> GossipSupport<AD>
|
||||
where
|
||||
AD: AuthorityDiscovery,
|
||||
@@ -138,11 +138,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
async fn run<Context>(mut self, mut ctx: Context) -> Self
|
||||
where
|
||||
Context: SubsystemContext<Message = GossipSupportMessage>,
|
||||
Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
|
||||
{
|
||||
async fn run<Context>(mut self, mut ctx: Context) -> Self {
|
||||
fn get_connectivity_check_delay() -> Delay {
|
||||
Delay::new(LOW_CONNECTIVITY_WARN_DELAY)
|
||||
}
|
||||
@@ -178,7 +174,7 @@ where
|
||||
gum::trace!(target: LOG_TARGET, "active leaves signal");
|
||||
|
||||
let leaves = activated.into_iter().map(|a| a.hash);
|
||||
if let Err(e) = self.handle_active_leaves(&mut ctx, leaves).await {
|
||||
if let Err(e) = self.handle_active_leaves(ctx.sender(), leaves).await {
|
||||
gum::debug!(target: LOG_TARGET, error = ?e);
|
||||
}
|
||||
},
|
||||
@@ -191,18 +187,13 @@ where
|
||||
/// 1. Determine if the current session index has changed.
|
||||
/// 2. If it has, determine relevant validators
|
||||
/// and issue a connection request.
|
||||
async fn handle_active_leaves<Context>(
|
||||
async fn handle_active_leaves(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
sender: &mut impl overseer::GossipSupportSenderTrait,
|
||||
leaves: impl Iterator<Item = Hash>,
|
||||
) -> Result<(), util::Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = GossipSupportMessage>,
|
||||
Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
|
||||
{
|
||||
) -> Result<(), util::Error> {
|
||||
for leaf in leaves {
|
||||
let current_index =
|
||||
util::request_session_index_for_child(leaf, ctx.sender()).await.await??;
|
||||
let current_index = util::request_session_index_for_child(leaf, sender).await.await??;
|
||||
let since_failure = self.last_failure.map(|i| i.elapsed()).unwrap_or_default();
|
||||
let force_request = since_failure >= BACKOFF_DURATION;
|
||||
let leaf_session = Some((current_index, leaf));
|
||||
@@ -216,7 +207,7 @@ where
|
||||
|
||||
if let Some((session_index, relay_parent)) = maybe_issue_connection {
|
||||
let session_info =
|
||||
util::request_session_info(leaf, session_index, ctx.sender()).await.await??;
|
||||
util::request_session_info(leaf, session_index, sender).await.await??;
|
||||
|
||||
let session_info = match session_info {
|
||||
Some(s) => s,
|
||||
@@ -255,13 +246,13 @@ where
|
||||
// by virtue of a new session being entered. Therefore we maintain
|
||||
// connections to a much broader set of validators.
|
||||
{
|
||||
let mut connections = authorities_past_present_future(ctx, leaf).await?;
|
||||
let mut connections = authorities_past_present_future(sender, leaf).await?;
|
||||
|
||||
// Remove all of our locally controlled validator indices so we don't connect to ourself.
|
||||
// If we control none of them, don't issue connection requests - we're outside
|
||||
// of the 'clique' of recent validators.
|
||||
if remove_all_controlled(&self.keystore, &mut connections).await != 0 {
|
||||
self.issue_connection_request(ctx, connections).await;
|
||||
self.issue_connection_request(sender, connections).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -273,7 +264,7 @@ where
|
||||
self.update_authority_status_metrics(&session_info).await;
|
||||
|
||||
update_gossip_topology(
|
||||
ctx,
|
||||
sender,
|
||||
our_index,
|
||||
session_info.discovery_keys,
|
||||
relay_parent,
|
||||
@@ -317,13 +308,12 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
async fn issue_connection_request<Context>(
|
||||
async fn issue_connection_request<Sender>(
|
||||
&mut self,
|
||||
ctx: &mut Context,
|
||||
sender: &mut Sender,
|
||||
authorities: Vec<AuthorityDiscoveryId>,
|
||||
) where
|
||||
Context: SubsystemContext<Message = GossipSupportMessage>,
|
||||
Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
|
||||
Sender: overseer::GossipSupportSenderTrait,
|
||||
{
|
||||
let num = authorities.len();
|
||||
let mut validator_addrs = Vec::with_capacity(authorities.len());
|
||||
@@ -347,11 +337,12 @@ where
|
||||
self.resolved_authorities = resolved;
|
||||
gum::debug!(target: LOG_TARGET, %num, "Issuing a connection request");
|
||||
|
||||
ctx.send_message(NetworkBridgeMessage::ConnectToResolvedValidators {
|
||||
validator_addrs,
|
||||
peer_set: PeerSet::Validation,
|
||||
})
|
||||
.await;
|
||||
sender
|
||||
.send_message(NetworkBridgeMessage::ConnectToResolvedValidators {
|
||||
validator_addrs,
|
||||
peer_set: PeerSet::Validation,
|
||||
})
|
||||
.await;
|
||||
|
||||
// issue another request for the same session
|
||||
// if at least a third of the authorities were not resolved.
|
||||
@@ -442,15 +433,11 @@ where
|
||||
}
|
||||
|
||||
// Get the authorities of the past, present, and future.
|
||||
async fn authorities_past_present_future<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn authorities_past_present_future(
|
||||
sender: &mut impl overseer::GossipSupportSenderTrait,
|
||||
relay_parent: Hash,
|
||||
) -> Result<Vec<AuthorityDiscoveryId>, util::Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = GossipSupportMessage>,
|
||||
Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
|
||||
{
|
||||
let authorities = util::request_authorities(relay_parent, ctx.sender()).await.await??;
|
||||
) -> Result<Vec<AuthorityDiscoveryId>, util::Error> {
|
||||
let authorities = util::request_authorities(relay_parent, sender).await.await??;
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
authority_count = ?authorities.len(),
|
||||
@@ -500,28 +487,25 @@ async fn remove_all_controlled(
|
||||
/// This limits the amount of gossip peers to 2 * `sqrt(len)` and ensures the diameter of 2.
|
||||
///
|
||||
/// [web3]: https://research.web3.foundation/en/latest/polkadot/networking/3-avail-valid.html#topology
|
||||
async fn update_gossip_topology<Context>(
|
||||
ctx: &mut Context,
|
||||
async fn update_gossip_topology(
|
||||
sender: &mut impl overseer::GossipSupportSenderTrait,
|
||||
our_index: usize,
|
||||
authorities: Vec<AuthorityDiscoveryId>,
|
||||
relay_parent: Hash,
|
||||
session_index: SessionIndex,
|
||||
) -> Result<(), util::Error>
|
||||
where
|
||||
Context: SubsystemContext<Message = GossipSupportMessage>,
|
||||
Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
|
||||
{
|
||||
) -> Result<(), util::Error> {
|
||||
// retrieve BABE randomness
|
||||
let random_seed = {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
// TODO https://github.com/paritytech/polkadot/issues/5316:
|
||||
// get the random seed from the `SessionInfo` instead.
|
||||
ctx.send_message(RuntimeApiMessage::Request(
|
||||
relay_parent,
|
||||
RuntimeApiRequest::CurrentBabeEpoch(tx),
|
||||
))
|
||||
.await;
|
||||
sender
|
||||
.send_message(RuntimeApiMessage::Request(
|
||||
relay_parent,
|
||||
RuntimeApiRequest::CurrentBabeEpoch(tx),
|
||||
))
|
||||
.await;
|
||||
|
||||
let randomness = rx.await??.randomness;
|
||||
let mut subject = [0u8; 40];
|
||||
@@ -553,12 +537,13 @@ where
|
||||
.map(|i| (authorities[i].clone(), ValidatorIndex::from(i as u32)))
|
||||
.collect();
|
||||
|
||||
ctx.send_message(NetworkBridgeMessage::NewGossipTopology {
|
||||
session: session_index,
|
||||
our_neighbors_x: row_neighbors,
|
||||
our_neighbors_y: column_neighbors,
|
||||
})
|
||||
.await;
|
||||
sender
|
||||
.send_message(NetworkBridgeMessage::NewGossipTopology {
|
||||
session: session_index,
|
||||
our_neighbors_x: row_neighbors,
|
||||
our_neighbors_y: column_neighbors,
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -596,10 +581,9 @@ fn matrix_neighbors(
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context, AD> overseer::Subsystem<Context, SubsystemError> for GossipSupport<AD>
|
||||
#[overseer::subsystem(GossipSupport, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Context, AD> GossipSupport<AD>
|
||||
where
|
||||
Context: SubsystemContext<Message = GossipSupportMessage>,
|
||||
Context: overseer::SubsystemContext<Message = GossipSupportMessage>,
|
||||
AD: AuthorityDiscovery + Clone,
|
||||
{
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
|
||||
@@ -38,11 +38,11 @@ use polkadot_node_subsystem_util::{self as util, rand, MIN_GOSSIP_PEERS};
|
||||
use polkadot_node_subsystem::{
|
||||
jaeger,
|
||||
messages::{
|
||||
AllMessages, CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeMessage,
|
||||
CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeMessage,
|
||||
StatementDistributionMessage,
|
||||
},
|
||||
overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, PerLeafSpan, SpawnedSubsystem,
|
||||
SubsystemContext, SubsystemError,
|
||||
SubsystemError,
|
||||
};
|
||||
use polkadot_primitives::v2::{
|
||||
AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, Hash,
|
||||
@@ -127,12 +127,8 @@ pub struct StatementDistributionSubsystem<R> {
|
||||
rng: R,
|
||||
}
|
||||
|
||||
impl<Context, R: rand::Rng + Send + Sync + 'static> overseer::Subsystem<Context, SubsystemError>
|
||||
for StatementDistributionSubsystem<R>
|
||||
where
|
||||
Context: SubsystemContext<Message = StatementDistributionMessage>,
|
||||
Context: overseer::SubsystemContext<Message = StatementDistributionMessage>,
|
||||
{
|
||||
#[overseer::subsystem(StatementDistribution, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context, R: rand::Rng + Send + Sync + 'static> StatementDistributionSubsystem<R> {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
// Swallow error because failure is fatal to the node and we log with more precision
|
||||
// within `run`.
|
||||
@@ -630,10 +626,10 @@ enum MuxedMessage {
|
||||
Responder(Option<ResponderMessage>),
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)]
|
||||
impl MuxedMessage {
|
||||
async fn receive(
|
||||
ctx: &mut (impl SubsystemContext<Message = StatementDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = StatementDistributionMessage>),
|
||||
async fn receive<Context>(
|
||||
ctx: &mut Context,
|
||||
from_requester: &mut mpsc::Receiver<RequesterMessage>,
|
||||
from_responder: &mut mpsc::Receiver<ResponderMessage>,
|
||||
) -> MuxedMessage {
|
||||
@@ -890,11 +886,12 @@ fn check_statement_signature(
|
||||
/// circulates the statement to all peers who have not seen it yet, and
|
||||
/// sends all statements dependent on that statement to peers who could previously not receive
|
||||
/// them but now can.
|
||||
async fn circulate_statement_and_dependents(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn circulate_statement_and_dependents<Context>(
|
||||
gossip_peers: &HashSet<PeerId>,
|
||||
peers: &mut HashMap<PeerId, PeerData>,
|
||||
active_heads: &mut HashMap<Hash, ActiveHeadData>,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
relay_parent: Hash,
|
||||
statement: SignedFullStatement,
|
||||
priority_peers: Vec<PeerId>,
|
||||
@@ -1006,10 +1003,11 @@ fn is_statement_large(statement: &SignedFullStatement) -> (bool, Option<usize>)
|
||||
|
||||
/// Circulates a statement to all peers who have not seen it yet, and returns
|
||||
/// an iterator over peers who need to have dependent statements sent.
|
||||
async fn circulate_statement<'a>(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn circulate_statement<'a, Context>(
|
||||
gossip_peers: &HashSet<PeerId>,
|
||||
peers: &mut HashMap<PeerId, PeerData>,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
relay_parent: Hash,
|
||||
stored: StoredStatement<'a>,
|
||||
mut priority_peers: Vec<PeerId>,
|
||||
@@ -1081,10 +1079,10 @@ async fn circulate_statement<'a>(
|
||||
statement = ?stored.statement,
|
||||
"Sending statement",
|
||||
);
|
||||
ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage(
|
||||
ctx.send_message(NetworkBridgeMessage::SendValidationMessage(
|
||||
peers_to_send.iter().map(|(p, _)| p.clone()).collect(),
|
||||
payload,
|
||||
)))
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -1095,10 +1093,11 @@ async fn circulate_statement<'a>(
|
||||
}
|
||||
|
||||
/// Send all statements about a given candidate hash to a peer.
|
||||
async fn send_statements_about(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn send_statements_about<Context>(
|
||||
peer: PeerId,
|
||||
peer_data: &mut PeerData,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
relay_parent: Hash,
|
||||
candidate_hash: CandidateHash,
|
||||
active_head: &ActiveHeadData,
|
||||
@@ -1120,21 +1119,19 @@ async fn send_statements_about(
|
||||
statement = ?statement.statement,
|
||||
"Sending statement",
|
||||
);
|
||||
ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage(
|
||||
vec![peer.clone()],
|
||||
payload,
|
||||
)))
|
||||
.await;
|
||||
ctx.send_message(NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload))
|
||||
.await;
|
||||
|
||||
metrics.on_statement_distributed();
|
||||
}
|
||||
}
|
||||
|
||||
/// Send all statements at a given relay-parent to a peer.
|
||||
async fn send_statements(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn send_statements<Context>(
|
||||
peer: PeerId,
|
||||
peer_data: &mut PeerData,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
relay_parent: Hash,
|
||||
active_head: &ActiveHeadData,
|
||||
metrics: &Metrics,
|
||||
@@ -1154,23 +1151,19 @@ async fn send_statements(
|
||||
statement = ?statement.statement,
|
||||
"Sending statement"
|
||||
);
|
||||
ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage(
|
||||
vec![peer.clone()],
|
||||
payload,
|
||||
)))
|
||||
.await;
|
||||
ctx.send_message(NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload))
|
||||
.await;
|
||||
|
||||
metrics.on_statement_distributed();
|
||||
}
|
||||
}
|
||||
|
||||
async fn report_peer(
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
sender: &mut impl overseer::StatementDistributionSenderTrait,
|
||||
peer: PeerId,
|
||||
rep: Rep,
|
||||
) {
|
||||
ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::ReportPeer(peer, rep)))
|
||||
.await
|
||||
sender.send_message(NetworkBridgeMessage::ReportPeer(peer, rep)).await
|
||||
}
|
||||
|
||||
/// If message contains a statement, then retrieve it, otherwise fork task to fetch it.
|
||||
@@ -1180,11 +1173,12 @@ async fn report_peer(
|
||||
/// your statement.
|
||||
///
|
||||
/// If the message was large, but the result has been fetched already that one is returned.
|
||||
async fn retrieve_statement_from_message<'a>(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn retrieve_statement_from_message<'a, Context>(
|
||||
peer: PeerId,
|
||||
message: protocol_v1::StatementDistributionMessage,
|
||||
active_head: &'a mut ActiveHeadData,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
req_sender: &mpsc::Sender<RequesterMessage>,
|
||||
metrics: &Metrics,
|
||||
) -> Option<UncheckedSignedFullStatement> {
|
||||
@@ -1272,11 +1266,12 @@ async fn retrieve_statement_from_message<'a>(
|
||||
/// Launch request for a large statement and get tracking status.
|
||||
///
|
||||
/// Returns `None` if spawning task failed.
|
||||
async fn launch_request(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn launch_request<Context>(
|
||||
meta: StatementMetadata,
|
||||
peer: PeerId,
|
||||
req_sender: mpsc::Sender<RequesterMessage>,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
metrics: &Metrics,
|
||||
) -> Option<LargeStatementStatus> {
|
||||
let (task, handle) =
|
||||
@@ -1302,19 +1297,21 @@ async fn launch_request(
|
||||
}
|
||||
|
||||
/// Handle incoming message and circulate it to peers, if we did not know it already.
|
||||
///
|
||||
async fn handle_incoming_message_and_circulate<'a>(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn handle_incoming_message_and_circulate<'a, Context, R>(
|
||||
peer: PeerId,
|
||||
gossip_peers: &HashSet<PeerId>,
|
||||
peers: &mut HashMap<PeerId, PeerData>,
|
||||
active_heads: &'a mut HashMap<Hash, ActiveHeadData>,
|
||||
recent_outdated_heads: &RecentOutdatedHeads,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
message: protocol_v1::StatementDistributionMessage,
|
||||
req_sender: &mpsc::Sender<RequesterMessage>,
|
||||
metrics: &Metrics,
|
||||
rng: &mut impl rand::Rng,
|
||||
) {
|
||||
rng: &mut R,
|
||||
) where
|
||||
R: rand::Rng,
|
||||
{
|
||||
let handled_incoming = match peers.get_mut(&peer) {
|
||||
Some(data) =>
|
||||
handle_incoming_message(
|
||||
@@ -1360,12 +1357,13 @@ async fn handle_incoming_message_and_circulate<'a>(
|
||||
//
|
||||
// This function checks the signature and ensures the statement is compatible with our
|
||||
// view. It also notifies candidate backing if the statement was previously unknown.
|
||||
async fn handle_incoming_message<'a>(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn handle_incoming_message<'a, Context>(
|
||||
peer: PeerId,
|
||||
peer_data: &mut PeerData,
|
||||
active_heads: &'a mut HashMap<Hash, ActiveHeadData>,
|
||||
recent_outdated_heads: &RecentOutdatedHeads,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
message: protocol_v1::StatementDistributionMessage,
|
||||
req_sender: &mpsc::Sender<RequesterMessage>,
|
||||
metrics: &Metrics,
|
||||
@@ -1383,7 +1381,7 @@ async fn handle_incoming_message<'a>(
|
||||
);
|
||||
|
||||
if !recent_outdated_heads.is_recent_outdated(&relay_parent) {
|
||||
report_peer(ctx, peer, COST_UNEXPECTED_STATEMENT).await;
|
||||
report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await;
|
||||
}
|
||||
|
||||
return None
|
||||
@@ -1393,7 +1391,7 @@ async fn handle_incoming_message<'a>(
|
||||
if let protocol_v1::StatementDistributionMessage::LargeStatement(_) = message {
|
||||
if let Err(rep) = peer_data.receive_large_statement(&relay_parent) {
|
||||
gum::debug!(target: LOG_TARGET, ?peer, ?message, ?rep, "Unexpected large statement.",);
|
||||
report_peer(ctx, peer, rep).await;
|
||||
report_peer(ctx.sender(), peer, rep).await;
|
||||
return None
|
||||
}
|
||||
}
|
||||
@@ -1434,16 +1432,16 @@ async fn handle_incoming_message<'a>(
|
||||
// Report peer merely if this is not a duplicate out-of-view statement that
|
||||
// was caused by a missing Seconded statement from this peer
|
||||
if unexpected_count == 0_usize {
|
||||
report_peer(ctx, peer, rep).await;
|
||||
report_peer(ctx.sender(), peer, rep).await;
|
||||
}
|
||||
},
|
||||
// This happens when we have an unexpected remote peer that announced Seconded
|
||||
COST_UNEXPECTED_STATEMENT_REMOTE => {
|
||||
metrics.on_unexpected_statement_seconded();
|
||||
report_peer(ctx, peer, rep).await;
|
||||
report_peer(ctx.sender(), peer, rep).await;
|
||||
},
|
||||
_ => {
|
||||
report_peer(ctx, peer, rep).await;
|
||||
report_peer(ctx.sender(), peer, rep).await;
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1464,7 +1462,7 @@ async fn handle_incoming_message<'a>(
|
||||
peer_data
|
||||
.receive(&relay_parent, &fingerprint, max_message_count)
|
||||
.expect("checked in `check_can_receive` above; qed");
|
||||
report_peer(ctx, peer, BENEFIT_VALID_STATEMENT).await;
|
||||
report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await;
|
||||
|
||||
return None
|
||||
},
|
||||
@@ -1474,7 +1472,7 @@ async fn handle_incoming_message<'a>(
|
||||
match check_statement_signature(&active_head, relay_parent, unchecked_compact) {
|
||||
Err(statement) => {
|
||||
gum::debug!(target: LOG_TARGET, ?peer, ?statement, "Invalid statement signature");
|
||||
report_peer(ctx, peer, COST_INVALID_SIGNATURE).await;
|
||||
report_peer(ctx.sender(), peer, COST_INVALID_SIGNATURE).await;
|
||||
return None
|
||||
},
|
||||
Ok(statement) => statement,
|
||||
@@ -1500,7 +1498,7 @@ async fn handle_incoming_message<'a>(
|
||||
is_large_statement,
|
||||
"Full statement had bad payload."
|
||||
);
|
||||
report_peer(ctx, peer, COST_WRONG_HASH).await;
|
||||
report_peer(ctx.sender(), peer, COST_WRONG_HASH).await;
|
||||
return None
|
||||
},
|
||||
Ok(statement) => statement,
|
||||
@@ -1539,7 +1537,7 @@ async fn handle_incoming_message<'a>(
|
||||
unreachable!("checked in `is_useful_or_unknown` above; qed");
|
||||
},
|
||||
NotedStatement::Fresh(statement) => {
|
||||
report_peer(ctx, peer, BENEFIT_VALID_STATEMENT_FIRST).await;
|
||||
report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await;
|
||||
|
||||
let mut _span = handle_incoming_span.child("notify-backing");
|
||||
|
||||
@@ -1557,16 +1555,19 @@ async fn handle_incoming_message<'a>(
|
||||
}
|
||||
|
||||
/// Update a peer's view. Sends all newly unlocked statements based on the previous
|
||||
async fn update_peer_view_and_maybe_send_unlocked(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn update_peer_view_and_maybe_send_unlocked<Context, R>(
|
||||
peer: PeerId,
|
||||
gossip_peers: &HashSet<PeerId>,
|
||||
peer_data: &mut PeerData,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
active_heads: &HashMap<Hash, ActiveHeadData>,
|
||||
new_view: View,
|
||||
metrics: &Metrics,
|
||||
rng: &mut impl rand::Rng,
|
||||
) {
|
||||
rng: &mut R,
|
||||
) where
|
||||
R: rand::Rng,
|
||||
{
|
||||
let old_view = std::mem::replace(&mut peer_data.view, new_view);
|
||||
|
||||
// Remove entries for all relay-parents in the old view but not the new.
|
||||
@@ -1596,18 +1597,21 @@ async fn update_peer_view_and_maybe_send_unlocked(
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_network_update(
|
||||
#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)]
|
||||
async fn handle_network_update<Context, R>(
|
||||
peers: &mut HashMap<PeerId, PeerData>,
|
||||
gossip_peers: &mut HashSet<PeerId>,
|
||||
authorities: &mut HashMap<AuthorityDiscoveryId, PeerId>,
|
||||
active_heads: &mut HashMap<Hash, ActiveHeadData>,
|
||||
recent_outdated_heads: &RecentOutdatedHeads,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
req_sender: &mpsc::Sender<RequesterMessage>,
|
||||
update: NetworkBridgeEvent<net_protocol::StatementDistributionMessage>,
|
||||
metrics: &Metrics,
|
||||
rng: &mut impl rand::Rng,
|
||||
) {
|
||||
rng: &mut R,
|
||||
) where
|
||||
R: rand::Rng,
|
||||
{
|
||||
match update {
|
||||
NetworkBridgeEvent::PeerConnected(peer, role, _, maybe_authority) => {
|
||||
gum::trace!(target: LOG_TARGET, ?peer, ?role, "Peer connected");
|
||||
@@ -1701,6 +1705,7 @@ async fn handle_network_update(
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)]
|
||||
impl<R: rand::Rng> StatementDistributionSubsystem<R> {
|
||||
/// Create a new Statement Distribution Subsystem
|
||||
pub fn new(
|
||||
@@ -1712,11 +1717,7 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
|
||||
Self { keystore, req_receiver: Some(req_receiver), metrics, rng }
|
||||
}
|
||||
|
||||
async fn run(
|
||||
mut self,
|
||||
mut ctx: (impl SubsystemContext<Message = StatementDistributionMessage>
|
||||
+ overseer::SubsystemContext<Message = StatementDistributionMessage>),
|
||||
) -> std::result::Result<(), FatalError> {
|
||||
async fn run<Context>(mut self, mut ctx: Context) -> std::result::Result<(), FatalError> {
|
||||
let mut peers: HashMap<PeerId, PeerData> = HashMap::new();
|
||||
let mut gossip_peers: HashSet<PeerId> = HashSet::new();
|
||||
let mut authorities: HashMap<AuthorityDiscoveryId, PeerId> = HashMap::new();
|
||||
@@ -1832,9 +1833,9 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_requester_message(
|
||||
async fn handle_requester_message<Context>(
|
||||
&mut self,
|
||||
ctx: &mut impl SubsystemContext,
|
||||
ctx: &mut Context,
|
||||
gossip_peers: &HashSet<PeerId>,
|
||||
peers: &mut HashMap<PeerId, PeerData>,
|
||||
active_heads: &mut HashMap<Hash, ActiveHeadData>,
|
||||
@@ -1851,9 +1852,9 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
|
||||
bad_peers,
|
||||
} => {
|
||||
for bad in bad_peers {
|
||||
report_peer(ctx, bad, COST_FETCH_FAIL).await;
|
||||
report_peer(ctx.sender(), bad, COST_FETCH_FAIL).await;
|
||||
}
|
||||
report_peer(ctx, from_peer, BENEFIT_VALID_RESPONSE).await;
|
||||
report_peer(ctx.sender(), from_peer, BENEFIT_VALID_RESPONSE).await;
|
||||
|
||||
let active_head = active_heads
|
||||
.get_mut(&relay_parent)
|
||||
@@ -1898,10 +1899,10 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
|
||||
}
|
||||
},
|
||||
RequesterMessage::SendRequest(req) => {
|
||||
ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendRequests(
|
||||
ctx.send_message(NetworkBridgeMessage::SendRequests(
|
||||
vec![req],
|
||||
IfDisconnected::ImmediateError,
|
||||
)))
|
||||
))
|
||||
.await;
|
||||
},
|
||||
RequesterMessage::GetMorePeers { relay_parent, candidate_hash, tx } => {
|
||||
@@ -1935,14 +1936,14 @@ impl<R: rand::Rng> StatementDistributionSubsystem<R> {
|
||||
}
|
||||
}
|
||||
},
|
||||
RequesterMessage::ReportPeer(peer, rep) => report_peer(ctx, peer, rep).await,
|
||||
RequesterMessage::ReportPeer(peer, rep) => report_peer(ctx.sender(), peer, rep).await,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_subsystem_message(
|
||||
async fn handle_subsystem_message<Context>(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
ctx: &mut Context,
|
||||
runtime: &mut RuntimeInfo,
|
||||
peers: &mut HashMap<PeerId, PeerData>,
|
||||
gossip_peers: &mut HashSet<PeerId>,
|
||||
|
||||
@@ -29,7 +29,7 @@ use polkadot_node_network_protocol::{
|
||||
use polkadot_node_primitives::{Statement, UncheckedSignedFullStatement};
|
||||
use polkadot_node_subsystem::{
|
||||
jaeger,
|
||||
messages::{network_bridge_event, RuntimeApiMessage, RuntimeApiRequest},
|
||||
messages::{network_bridge_event, AllMessages, RuntimeApiMessage, RuntimeApiRequest},
|
||||
ActivatedLeaf, LeafStatus,
|
||||
};
|
||||
use polkadot_node_subsystem_test_helpers::mock::make_ferdie_keystore;
|
||||
|
||||
@@ -24,14 +24,12 @@ use std::time::Duration;
|
||||
|
||||
use ::test_helpers::{dummy_candidate_descriptor, dummy_hash};
|
||||
use polkadot_node_primitives::{BlockData, PoV};
|
||||
use polkadot_node_subsystem_types::messages::{
|
||||
CandidateBackingMessage, CandidateValidationMessage,
|
||||
};
|
||||
use polkadot_node_subsystem_types::messages::CandidateValidationMessage;
|
||||
use polkadot_overseer::{
|
||||
self as overseer,
|
||||
dummy::dummy_overseer_builder,
|
||||
gen::{FromOverseer, SpawnedSubsystem},
|
||||
AllMessages, HeadSupportsParachains, OverseerSignal, SubsystemError,
|
||||
HeadSupportsParachains, SubsystemError,
|
||||
};
|
||||
use polkadot_primitives::v2::{CandidateReceipt, Hash};
|
||||
|
||||
@@ -46,15 +44,9 @@ impl HeadSupportsParachains for AlwaysSupportsParachains {
|
||||
|
||||
struct Subsystem1;
|
||||
|
||||
#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)]
|
||||
impl Subsystem1 {
|
||||
async fn run<Ctx>(mut ctx: Ctx) -> ()
|
||||
where
|
||||
Ctx: overseer::SubsystemContext<
|
||||
Message = CandidateBackingMessage,
|
||||
AllMessages = AllMessages,
|
||||
Signal = OverseerSignal,
|
||||
>,
|
||||
{
|
||||
async fn run<Context>(mut ctx: Context) {
|
||||
'louy: loop {
|
||||
match ctx.try_recv().await {
|
||||
Ok(Some(msg)) => {
|
||||
@@ -84,21 +76,14 @@ impl Subsystem1 {
|
||||
Default::default(),
|
||||
tx,
|
||||
);
|
||||
ctx.send_message(<Ctx as overseer::SubsystemContext>::AllMessages::from(msg))
|
||||
.await;
|
||||
ctx.send_message(msg).await;
|
||||
}
|
||||
()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for Subsystem1
|
||||
where
|
||||
Context: overseer::SubsystemContext<
|
||||
Message = CandidateBackingMessage,
|
||||
AllMessages = AllMessages,
|
||||
Signal = OverseerSignal,
|
||||
>,
|
||||
{
|
||||
#[overseer::subsystem(CandidateBacking, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Context> Subsystem1 {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem<SubsystemError> {
|
||||
let future = Box::pin(async move {
|
||||
Self::run(ctx).await;
|
||||
@@ -113,15 +98,9 @@ where
|
||||
|
||||
struct Subsystem2;
|
||||
|
||||
#[overseer::contextbounds(CandidateValidation, prefix = self::overseer)]
|
||||
impl Subsystem2 {
|
||||
async fn run<Ctx>(mut ctx: Ctx)
|
||||
where
|
||||
Ctx: overseer::SubsystemContext<
|
||||
Message = CandidateValidationMessage,
|
||||
AllMessages = AllMessages,
|
||||
Signal = OverseerSignal,
|
||||
>,
|
||||
{
|
||||
async fn run<Context>(mut ctx: Context) -> () {
|
||||
ctx.spawn(
|
||||
"subsystem-2-job",
|
||||
Box::pin(async {
|
||||
@@ -151,14 +130,8 @@ impl Subsystem2 {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Context> overseer::Subsystem<Context, SubsystemError> for Subsystem2
|
||||
where
|
||||
Context: overseer::SubsystemContext<
|
||||
Message = CandidateValidationMessage,
|
||||
AllMessages = AllMessages,
|
||||
Signal = OverseerSignal,
|
||||
>,
|
||||
{
|
||||
#[overseer::subsystem(CandidateValidation, error = SubsystemError, prefix = self::overseer)]
|
||||
impl<Context> Subsystem2 {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem<SubsystemError> {
|
||||
let future = Box::pin(async move {
|
||||
Self::run(ctx).await;
|
||||
@@ -181,6 +154,7 @@ fn main() {
|
||||
.unwrap()
|
||||
.replace_candidate_validation(|_| Subsystem2)
|
||||
.replace_candidate_backing(|orig| orig)
|
||||
.replace_candidate_backing(|_orig| Subsystem1)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ version = "0.9.19"
|
||||
authors = ["Parity Technologies <admin@parity.io>"]
|
||||
edition = "2021"
|
||||
description = "Generate an overseer including builder pattern and message wrapper from a single struct."
|
||||
autoexamples = false
|
||||
|
||||
[dependencies]
|
||||
gum = { package = "tracing-gum", path = "../../gum" }
|
||||
@@ -22,6 +23,16 @@ pin-project = "1.0"
|
||||
trybuild = "1.0.61"
|
||||
rustversion = "1.0.6"
|
||||
|
||||
|
||||
|
||||
[[example]]
|
||||
name = "duo"
|
||||
crate-type = ["bin"]
|
||||
|
||||
[[example]]
|
||||
name = "solo"
|
||||
crate-type = ["bin"]
|
||||
|
||||
[features]
|
||||
default = []
|
||||
expand = ["polkadot-overseer-gen-proc-macro/expand"]
|
||||
|
||||
@@ -11,17 +11,20 @@ declarative.
|
||||
```rust
|
||||
#[overlord(signal=SigSigSig, event=Event, gen=AllMessages, error=OverseerError)]
|
||||
pub struct Overseer {
|
||||
#[subsystem(MsgA)]
|
||||
#[subsystem(MsgA, sends: [MsgB])]
|
||||
sub_a: AwesomeSubSysA,
|
||||
|
||||
#[subsystem(MsgB)]
|
||||
#[subsystem(MsgB, sends: [MsgA])]
|
||||
sub_b: AwesomeSubSysB,
|
||||
}
|
||||
```
|
||||
|
||||
* Each subsystem is annotated with `#[subsystem(_)]` where `MsgA` respectively `MsgB` are the messages
|
||||
being consumed by that particular subsystem. Each of those subsystems is required to implement the subsystem
|
||||
trait.
|
||||
trait with the correct trait bounds. Commonly this is achieved
|
||||
by using `#[subsystem]` and `#[contextbounds]` macro.
|
||||
* `#[contextbounds(Foo, error=Yikes, prefix=wherethetraitsat)]` can applied to `impl`-blocks and `fn`-blocks. It will add additional trait bounds for the generic `Context` with `Context: FooContextTrait` for `<Context as FooContextTrait>::Sender: FooSenderTrait` besides a few more. Note that `Foo` here references the name of the subsystem as declared in `#[overlord(..)]` macro.
|
||||
* `#[subsystem(Foo, error=Yikes, prefix=wherethetraitsat)]` is a extension to the above, implementing `trait Subsystem<Context, Yikes>`.
|
||||
* `error=` tells the overseer to use the user provided
|
||||
error type, if not provided a builtin one is used. Note that this is the one error type used throughout all calls, so make sure it does impl `From<E>` for all other error types `E` that are relevant to your application.
|
||||
* `event=` declares an external event type, that injects certain events
|
||||
@@ -63,10 +66,10 @@ is not ready to be included in the Overseer:
|
||||
```rust
|
||||
#[overlord(signal=SigSigSig, event=Event, gen=AllMessages, error=OverseerError)]
|
||||
pub struct Overseer {
|
||||
#[subsystem(MsgA)]
|
||||
#[subsystem(MsgA, sends: MsgB)]
|
||||
sub_a: AwesomeSubSysA,
|
||||
|
||||
#[subsystem(MsgB), wip]
|
||||
#[subsystem(MsgB, sends: MsgA), wip]
|
||||
sub_b: AwesomeSubSysB, // This subsystem will not be required nor allowed to be set
|
||||
}
|
||||
```
|
||||
|
||||
@@ -0,0 +1,21 @@
|
||||
# Limit outgoing messages
|
||||
|
||||
## Status
|
||||
|
||||
Accepted + implemented.
|
||||
|
||||
## Context
|
||||
|
||||
Previously, there was no way to limit and hence reason about a subset of subsystems, and if they form a cycle. Limiting the outgoing message types is a first step to create respective graphs and use classic graph algorithms to detect those and leave it to the user to resolve these.
|
||||
|
||||
## Decision
|
||||
|
||||
Annotate the `#[overlord]` inner `#[subsystem(..)]` annotation
|
||||
with an aditional set of outgoing messages and enforce this via more fine grained trait bounds on the `Sender` and `<Context>::Sender` bounds.
|
||||
|
||||
## Consequences
|
||||
|
||||
* A graph will be spawn for every compilation under the `OUT_DIR` of the crate where `#[overlord]` is specified.
|
||||
* Each subsystem has a consuming message which is often referred to as generic `M` (no change on that, is as before), but now we have trait `AssociateOutgoing { type OutgoingMessages = ..; }` which defines an outgoing helper `enum` that is generated with an ident constructed as `${Subsystem}OutgoingMessages` where `${Subsystem}` is the subsystem identifier as used in the overseer declaration. `${Subsystem}OutgoingMessages` is used throughout everywhere to constrain the outgoing messages (commonly referred to as `OutgoingMessage` generic bounded by `${Subsystem}OutgoingMessages: From<OutgoingMessage>` or `::OutgoingMessages: From`. It's what allows the construction of the graph and compile time verification.
|
||||
* `${Subsystem}SenderTrait` and `${Subsystem}ContextTrait` are accumulation traits or wrapper traits, that combine over all annotated M or `OutgoingMessages` from the overseer declaration or their respective outgoing types. It is usage convenience and assures consistency within a subsystem while also maintaining a single source of truth for which messages can be sent by a particular subsystem. Note that this is sidestepped for the test subsystem, which may consume `gen=AllMessages`, the global message wrapper type.
|
||||
* `Job`-based subsystems, being on their way out, are patched, but they now are generic over the `Sender` type, leaking that type.
|
||||
@@ -1,143 +0,0 @@
|
||||
//! A dummy to be used with cargo expand
|
||||
|
||||
use polkadot_node_network_protocol::WrongVariant;
|
||||
use polkadot_overseer_gen::*;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Concrete subsystem implementation for `MsgStrukt` msg type.
|
||||
#[derive(Default)]
|
||||
pub struct AwesomeSubSys;
|
||||
|
||||
impl ::polkadot_overseer_gen::Subsystem<XxxSubsystemContext<MsgStrukt>, Yikes> for AwesomeSubSys {
|
||||
fn start(self, _ctx: XxxSubsystemContext<MsgStrukt>) -> SpawnedSubsystem<Yikes> {
|
||||
unimplemented!("starting yay!")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct GoblinTower;
|
||||
|
||||
impl ::polkadot_overseer_gen::Subsystem<XxxSubsystemContext<Plinko>, Yikes> for GoblinTower {
|
||||
fn start(self, _ctx: XxxSubsystemContext<Plinko>) -> SpawnedSubsystem<Yikes> {
|
||||
unimplemented!("welcum")
|
||||
}
|
||||
}
|
||||
|
||||
/// A signal sent by the overseer.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SigSigSig;
|
||||
|
||||
/// The external event.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EvX;
|
||||
|
||||
impl EvX {
|
||||
pub fn focus<'a, T>(&'a self) -> Result<EvX, ()> {
|
||||
unimplemented!("dispatch")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Yikes;
|
||||
|
||||
impl std::fmt::Display for Yikes {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "yikes!")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Yikes {}
|
||||
|
||||
impl From<polkadot_overseer_gen::OverseerError> for Yikes {
|
||||
fn from(_: polkadot_overseer_gen::OverseerError) -> Yikes {
|
||||
Yikes
|
||||
}
|
||||
}
|
||||
|
||||
impl From<polkadot_overseer_gen::mpsc::SendError> for Yikes {
|
||||
fn from(_: polkadot_overseer_gen::mpsc::SendError) -> Yikes {
|
||||
Yikes
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MsgStrukt(u8);
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Plinko;
|
||||
|
||||
impl From<NetworkMsg> for MsgStrukt {
|
||||
fn from(_event: NetworkMsg) -> Self {
|
||||
MsgStrukt(1u8)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum NetworkMsg {
|
||||
A,
|
||||
B,
|
||||
C,
|
||||
}
|
||||
|
||||
impl NetworkMsg {
|
||||
fn focus(&self) -> Result<Self, WrongVariant> {
|
||||
Ok(match self {
|
||||
Self::B => return Err(WrongVariant),
|
||||
Self::A | Self::C => self.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[overlord(signal=SigSigSig, event=EvX, error=Yikes, network=NetworkMsg, gen=AllMessages)]
|
||||
struct Xxx<T> {
|
||||
#[subsystem(MsgStrukt)]
|
||||
sub0: AwesomeSubSys,
|
||||
|
||||
#[subsystem(no_dispatch, blocking, Plinko)]
|
||||
plinkos: GoblinTower,
|
||||
|
||||
i_like_pi: f64,
|
||||
i_like_generic: T,
|
||||
i_like_hash: HashMap<f64, f64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct DummySpawner;
|
||||
|
||||
impl SpawnNamed for DummySpawner {
|
||||
fn spawn_blocking(
|
||||
&self,
|
||||
task_name: &'static str,
|
||||
subsystem_name: Option<&'static str>,
|
||||
_future: futures::future::BoxFuture<'static, ()>,
|
||||
) {
|
||||
unimplemented!("spawn blocking {} {}", task_name, subsystem_name.unwrap_or("default"))
|
||||
}
|
||||
|
||||
fn spawn(
|
||||
&self,
|
||||
task_name: &'static str,
|
||||
subsystem_name: Option<&'static str>,
|
||||
_future: futures::future::BoxFuture<'static, ()>,
|
||||
) {
|
||||
unimplemented!("spawn {} {}", task_name, subsystem_name.unwrap_or("default"))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct DummyCtx;
|
||||
|
||||
fn main() {
|
||||
let (overseer, _handle): (Xxx<_, f64>, _) = Xxx::builder()
|
||||
.sub0(AwesomeSubSys::default())
|
||||
.plinkos(GoblinTower::default())
|
||||
.i_like_pi(::std::f64::consts::PI)
|
||||
.i_like_generic(42.0)
|
||||
.i_like_hash(HashMap::new())
|
||||
.spawner(DummySpawner)
|
||||
.build()
|
||||
.unwrap();
|
||||
assert_eq!(overseer.i_like_pi.floor() as i8, 3);
|
||||
assert_eq!(overseer.i_like_generic.floor() as i8, 42);
|
||||
assert_eq!(overseer.i_like_hash.len() as i8, 0);
|
||||
}
|
||||
@@ -0,0 +1,89 @@
|
||||
#![allow(dead_code)] // overseer events are not used
|
||||
|
||||
//! A dummy to be used with cargo expand
|
||||
|
||||
use polkadot_overseer_gen::{self as overseer, SpawnNamed, *};
|
||||
use std::collections::HashMap;
|
||||
mod misc;
|
||||
|
||||
pub use self::misc::*;
|
||||
|
||||
/// Concrete subsystem implementation for `MsgStrukt` msg type.
|
||||
#[derive(Default)]
|
||||
pub struct AwesomeSubSys;
|
||||
|
||||
#[overseer::subsystem(Awesome, error=Yikes)]
|
||||
impl<Context> AwesomeSubSys {
|
||||
fn start(self, mut ctx: Context) -> SpawnedSubsystem<Yikes> {
|
||||
let mut sender = ctx.sender().clone();
|
||||
ctx.spawn(
|
||||
"AwesomeSubsys",
|
||||
Box::pin(async move {
|
||||
sender.send_message(Plinko).await;
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
unimplemented!("starting yay!")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Fortified;
|
||||
|
||||
#[overseer::subsystem(GoblinTower, error=Yikes)]
|
||||
impl<Context> Fortified {
|
||||
fn start(self, mut ctx: Context) -> SpawnedSubsystem<Yikes> {
|
||||
let mut sender = ctx.sender().clone();
|
||||
ctx.spawn(
|
||||
"GoblinTower",
|
||||
Box::pin(async move {
|
||||
sender.send_message(MsgStrukt(8u8)).await;
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
unimplemented!("welcum")
|
||||
}
|
||||
}
|
||||
|
||||
#[overlord(signal=SigSigSig, event=EvX, error=Yikes, gen=AllMessages)]
|
||||
struct Duo<T> {
|
||||
#[subsystem(consumes: MsgStrukt, sends: [Plinko])]
|
||||
sub0: Awesome,
|
||||
|
||||
#[subsystem(blocking, consumes: Plinko, sends: [MsgStrukt])]
|
||||
plinkos: GoblinTower,
|
||||
|
||||
i_like_pi: f64,
|
||||
i_like_generic: T,
|
||||
i_like_hash: HashMap<f64, f64>,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
use futures::{executor, pin_mut};
|
||||
|
||||
executor::block_on(async move {
|
||||
let (overseer, _handle): (Duo<_, f64>, _) = Duo::builder()
|
||||
.sub0(AwesomeSubSys::default())
|
||||
.plinkos(Fortified::default())
|
||||
.i_like_pi(::std::f64::consts::PI)
|
||||
.i_like_generic(42.0)
|
||||
.i_like_hash(HashMap::new())
|
||||
.spawner(DummySpawner)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(overseer.i_like_pi.floor() as i8, 3);
|
||||
assert_eq!(overseer.i_like_generic.floor() as i8, 42);
|
||||
assert_eq!(overseer.i_like_hash.len() as i8, 0);
|
||||
|
||||
let overseer_fut = overseer
|
||||
.running_subsystems
|
||||
.into_future()
|
||||
.timeout(std::time::Duration::from_millis(300))
|
||||
.fuse();
|
||||
|
||||
pin_mut!(overseer_fut);
|
||||
|
||||
overseer_fut.await
|
||||
});
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
use polkadot_overseer_gen::{SpawnNamed, *};
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum SigSigSig {
|
||||
Conclude,
|
||||
Foo,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DummySpawner;
|
||||
|
||||
impl SpawnNamed for DummySpawner {
|
||||
fn spawn_blocking(
|
||||
&self,
|
||||
task_name: &'static str,
|
||||
subsystem_name: Option<&'static str>,
|
||||
_future: futures::future::BoxFuture<'static, ()>,
|
||||
) {
|
||||
unimplemented!("spawn blocking {} {}", task_name, subsystem_name.unwrap_or("default"))
|
||||
}
|
||||
|
||||
fn spawn(
|
||||
&self,
|
||||
task_name: &'static str,
|
||||
subsystem_name: Option<&'static str>,
|
||||
_future: futures::future::BoxFuture<'static, ()>,
|
||||
) {
|
||||
unimplemented!("spawn {} {}", task_name, subsystem_name.unwrap_or("default"))
|
||||
}
|
||||
}
|
||||
|
||||
/// The external event.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EvX;
|
||||
|
||||
impl EvX {
|
||||
pub fn focus<'a, T>(&'a self) -> Result<EvX, ()> {
|
||||
unimplemented!("focus")
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Yikes;
|
||||
|
||||
impl std::fmt::Display for Yikes {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
writeln!(f, "yikes!")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Yikes {}
|
||||
|
||||
impl From<polkadot_overseer_gen::OverseerError> for Yikes {
|
||||
fn from(_: polkadot_overseer_gen::OverseerError) -> Yikes {
|
||||
Yikes
|
||||
}
|
||||
}
|
||||
|
||||
impl From<polkadot_overseer_gen::mpsc::SendError> for Yikes {
|
||||
fn from(_: polkadot_overseer_gen::mpsc::SendError) -> Yikes {
|
||||
Yikes
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MsgStrukt(pub u8);
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct Plinko;
|
||||
@@ -0,0 +1,54 @@
|
||||
#![allow(dead_code)] // overseer events are not used
|
||||
|
||||
//! A minimal demo to be used with cargo expand.
|
||||
|
||||
use polkadot_overseer_gen::{self as overseer, SpawnNamed, *};
|
||||
mod misc;
|
||||
|
||||
pub use self::misc::*;
|
||||
|
||||
#[overlord(signal=SigSigSig, event=EvX, error=Yikes, gen=AllMessages)]
|
||||
struct Solo<T> {
|
||||
#[subsystem(consumes: Plinko, sends: [MsgStrukt])]
|
||||
goblin_tower: GoblinTower,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Fortified;
|
||||
|
||||
#[overseer::subsystem(GoblinTower, error=Yikes)]
|
||||
impl<Context> Fortified {
|
||||
fn start(self, mut ctx: Context) -> SpawnedSubsystem<Yikes> {
|
||||
let mut sender = ctx.sender().clone();
|
||||
ctx.spawn(
|
||||
"GoblinTower",
|
||||
Box::pin(async move {
|
||||
sender.send_message(MsgStrukt(8u8)).await;
|
||||
}),
|
||||
)
|
||||
.unwrap();
|
||||
unimplemented!("welcum")
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
use futures::{executor, pin_mut};
|
||||
|
||||
executor::block_on(async move {
|
||||
let (overseer, _handle): (Solo<_>, _) = Solo::builder()
|
||||
.goblin_tower(Fortified::default())
|
||||
.spawner(DummySpawner)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let overseer_fut = overseer
|
||||
.running_subsystems
|
||||
.into_future()
|
||||
.timeout(std::time::Duration::from_millis(300))
|
||||
.fuse();
|
||||
|
||||
pin_mut!(overseer_fut);
|
||||
|
||||
overseer_fut.await
|
||||
});
|
||||
}
|
||||
@@ -17,12 +17,19 @@ quote = "1.0.18"
|
||||
proc-macro2 = "1.0.37"
|
||||
proc-macro-crate = "1.1.3"
|
||||
expander = { version = "0.0.6", default-features = false }
|
||||
petgraph = "0.6.0"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_matches = "1.5.0"
|
||||
polkadot-overseer-gen = { path = "../" }
|
||||
thiserror = "1"
|
||||
gum = { package = "tracing-gum", path = "../../../gum" }
|
||||
|
||||
[features]
|
||||
default = []
|
||||
default = ["graph", "expand"]
|
||||
# write the expanded version to a `overlord-expansion.[a-f0-9]{10}.rs`
|
||||
# in the `OUT_DIR` as defined by `cargo` for the `expander` crate.
|
||||
expand = []
|
||||
# Create directional message consuming / outgoing graph.
|
||||
# Generates: `${OUT_DIR}/${overseer|lowercase}-subsystem-messaging.dot`
|
||||
graph = []
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
fn main() {
|
||||
// populate OUT_DIR
|
||||
}
|
||||
@@ -103,7 +103,7 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
info.subsystems().iter().filter(|ssf| !ssf.wip).enumerate().map(|(idx, ssf)| {
|
||||
let field_name = &ssf.name;
|
||||
let field_type = &ssf.generic;
|
||||
let subsystem_consumes = &ssf.consumes;
|
||||
let subsystem_consumes = &ssf.message_to_consume;
|
||||
// Remove state generic for the item to be replaced. It sufficient to know `field_type` for
|
||||
// that since we always move from `Init<#field_type>` to `Init<NEW>`.
|
||||
let impl_subsystem_state_generics = recollect_without_idx(&subsystem_passthrough_state_generics[..], idx);
|
||||
@@ -130,19 +130,28 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
// see the loop below.
|
||||
let to_keep_subsystem_name = recollect_without_idx(&subsystem_name[..], idx);
|
||||
|
||||
let subsystem_sender_trait = format_ident!("{}SenderTrait", field_type);
|
||||
let _subsystem_ctx_trait = format_ident!("{}ContextTrait", field_type);
|
||||
|
||||
let builder_where_clause = quote!{
|
||||
#field_type : #support_crate::Subsystem< #subsystem_ctx_name< #subsystem_consumes >, #error_ty>,
|
||||
< #subsystem_ctx_name < #subsystem_consumes > as #support_crate :: SubsystemContext>::Sender:
|
||||
#subsystem_sender_trait,
|
||||
};
|
||||
|
||||
// Create the field init `fn`
|
||||
quote! {
|
||||
impl <InitStateSpawner, #field_type, #( #impl_subsystem_state_generics, )* #( #baggage_passthrough_state_generics, )*>
|
||||
#builder <InitStateSpawner, #( #current_state_generics, )* #( #baggage_passthrough_state_generics, )*>
|
||||
where
|
||||
#field_type : Subsystem<#subsystem_ctx_name<#subsystem_consumes>, #error_ty>,
|
||||
#builder_where_clause
|
||||
{
|
||||
/// Specify the subsystem in the builder directly
|
||||
pub fn #field_name (self, var: #field_type ) ->
|
||||
#builder <InitStateSpawner, #( #post_setter_state_generics, )* #( #baggage_passthrough_state_generics, )*>
|
||||
{
|
||||
#builder {
|
||||
#field_name: Init::<#field_type>::Value(var),
|
||||
#field_name: Init::< #field_type >::Value(var),
|
||||
#(
|
||||
#to_keep_subsystem_name: self. #to_keep_subsystem_name,
|
||||
)*
|
||||
@@ -158,7 +167,7 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
/// Specify the the initialization function for a subsystem
|
||||
pub fn #field_name_with<'a, F>(self, subsystem_init_fn: F ) ->
|
||||
#builder <InitStateSpawner, #( #post_setter_state_generics, )* #( #baggage_passthrough_state_generics, )*>
|
||||
where
|
||||
where
|
||||
F: 'static + FnOnce(#handle) ->
|
||||
::std::result::Result<#field_type, #error_ty>,
|
||||
{
|
||||
@@ -185,7 +194,7 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
impl <InitStateSpawner, #field_type, #( #impl_subsystem_state_generics, )* #( #baggage_passthrough_state_generics, )*>
|
||||
#builder <InitStateSpawner, #( #post_setter_state_generics, )* #( #baggage_passthrough_state_generics, )*>
|
||||
where
|
||||
#field_type : Subsystem<#subsystem_ctx_name<#subsystem_consumes>, #error_ty>,
|
||||
#builder_where_clause
|
||||
{
|
||||
/// Replace a subsystem by another implementation for the
|
||||
/// consumable message type.
|
||||
@@ -301,6 +310,28 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
S, #( #baggage_generic_ty, )* #( #subsystem_generics, )*
|
||||
};
|
||||
|
||||
let builder_where_clause = info
|
||||
.subsystems()
|
||||
.iter()
|
||||
.map(|ssf| {
|
||||
let field_type = &ssf.generic;
|
||||
let consumes = &ssf.message_to_consume;
|
||||
let subsystem_sender_trait = format_ident!("{}SenderTrait", ssf.generic);
|
||||
let subsystem_ctx_trait = format_ident!("{}ContextTrait", ssf.generic);
|
||||
quote! {
|
||||
#field_type:
|
||||
#support_crate::Subsystem< #subsystem_ctx_name < #consumes>, #error_ty>,
|
||||
<#subsystem_ctx_name< #consumes > as #subsystem_ctx_trait>::Sender:
|
||||
#subsystem_sender_trait,
|
||||
#subsystem_ctx_name< #consumes >:
|
||||
#subsystem_ctx_trait,
|
||||
}
|
||||
})
|
||||
.fold(TokenStream::new(), |mut ts, addendum| {
|
||||
ts.extend(addendum);
|
||||
ts
|
||||
});
|
||||
|
||||
let mut ts = quote! {
|
||||
/// Convenience alias.
|
||||
type SubsystemInitFn<T> = Box<dyn FnOnce(#handle) -> ::std::result::Result<T, #error_ty> >;
|
||||
@@ -332,14 +363,15 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, #( #baggage_generic_ty, )*> #overseer_name <S, #( #baggage_generic_ty, )*> where #spawner_where_clause {
|
||||
impl<S #(, #baggage_generic_ty )*> #overseer_name <S #(, #baggage_generic_ty)*>
|
||||
where
|
||||
#spawner_where_clause,
|
||||
{
|
||||
/// Create a new overseer utilizing the builder.
|
||||
pub fn builder< #( #subsystem_generics),* >() ->
|
||||
#builder<Missing<S> #(, Missing<#field_type> )* >
|
||||
#builder<Missing<S> #(, Missing< #field_type > )* >
|
||||
where
|
||||
#(
|
||||
#subsystem_generics : Subsystem<#subsystem_ctx_name< #consumes >, #error_ty>,
|
||||
)*
|
||||
#builder_where_clause
|
||||
{
|
||||
#builder :: new()
|
||||
}
|
||||
@@ -398,7 +430,8 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
|
||||
ts.extend(quote!{
|
||||
/// Builder pattern to create compile time safe construction path.
|
||||
pub struct #builder <InitStateSpawner, #( #subsystem_passthrough_state_generics, )* #( #baggage_passthrough_state_generics, )*> {
|
||||
pub struct #builder <InitStateSpawner, #( #subsystem_passthrough_state_generics, )* #( #baggage_passthrough_state_generics, )*>
|
||||
{
|
||||
#(
|
||||
#subsystem_name: #subsystem_passthrough_state_generics,
|
||||
)*
|
||||
@@ -445,7 +478,7 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
impl<S, #( #subsystem_passthrough_state_generics, )* #( #baggage_passthrough_state_generics, )*>
|
||||
#builder<Missing<S>, #( #subsystem_passthrough_state_generics, )* #( #baggage_passthrough_state_generics, )*>
|
||||
where
|
||||
#spawner_where_clause
|
||||
#spawner_where_clause,
|
||||
{
|
||||
/// The `spawner` to use for spawning tasks.
|
||||
pub fn spawner(self, spawner: S) -> #builder<
|
||||
@@ -490,6 +523,12 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
}
|
||||
});
|
||||
|
||||
// Create the string literals for spawn.
|
||||
let subsystem_name_str_literal = subsystem_name
|
||||
.iter()
|
||||
.map(|ident| proc_macro2::Literal::string(ident.to_string().replace("_", "-").as_str()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
ts.extend(quote! {
|
||||
/// Type used to represent a builder where all fields are initialized and the overseer could be constructed.
|
||||
pub type #initialized_builder<#initialized_builder_generics> = #builder<Init<S>, #( Init<#field_type>, )*>;
|
||||
@@ -498,9 +537,7 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
impl<#initialized_builder_generics> #initialized_builder<#initialized_builder_generics>
|
||||
where
|
||||
#spawner_where_clause,
|
||||
#(
|
||||
#subsystem_generics : Subsystem<#subsystem_ctx_name< #consumes >, #error_ty>,
|
||||
)*
|
||||
#builder_where_clause
|
||||
{
|
||||
/// Complete the construction and create the overseer type.
|
||||
pub fn build(self)
|
||||
@@ -577,17 +614,12 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
self.signal_capacity.unwrap_or(SIGNAL_CHANNEL_CAPACITY)
|
||||
);
|
||||
|
||||
// Generate subsystem name based on overseer field name.
|
||||
let subsystem_string = String::from(stringify!(#subsystem_name));
|
||||
// Convert owned `snake case` string to a `kebab case` static str.
|
||||
let subsystem_static_str = Box::leak(subsystem_string.replace("_", "-").into_boxed_str());
|
||||
|
||||
let ctx = #subsystem_ctx_name::< #consumes >::new(
|
||||
signal_rx,
|
||||
message_rx,
|
||||
channels_out.clone(),
|
||||
to_overseer_tx.clone(),
|
||||
subsystem_static_str
|
||||
#subsystem_name_str_literal
|
||||
);
|
||||
|
||||
let #subsystem_name: OverseenSubsystem< #consumes > =
|
||||
@@ -598,7 +630,7 @@ pub(crate) fn impl_builder(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
unbounded_meter,
|
||||
ctx,
|
||||
#subsystem_name,
|
||||
subsystem_static_str,
|
||||
#subsystem_name_str_literal,
|
||||
&mut running_subsystems,
|
||||
)?;
|
||||
)*
|
||||
|
||||
@@ -65,6 +65,7 @@ pub(crate) fn impl_channels_out_struct(info: &OverseerInfo) -> Result<proc_macro
|
||||
signals_received: usize,
|
||||
message: #message_wrapper,
|
||||
) {
|
||||
|
||||
let res: ::std::result::Result<_, _> = match message {
|
||||
#(
|
||||
#message_wrapper :: #consumes_variant ( inner ) => {
|
||||
@@ -79,6 +80,13 @@ pub(crate) fn impl_channels_out_struct(info: &OverseerInfo) -> Result<proc_macro
|
||||
)*
|
||||
// dummy message type
|
||||
#message_wrapper :: Empty => Ok(()),
|
||||
|
||||
#[allow(unreachable_patterns)]
|
||||
// And everything that's not WIP but no subsystem consumes it
|
||||
unused_msg => {
|
||||
#support_crate :: gum :: warn!("Nothing consumes {:?}", unused_msg);
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(subsystem_name) = res {
|
||||
@@ -110,7 +118,14 @@ pub(crate) fn impl_channels_out_struct(info: &OverseerInfo) -> Result<proc_macro
|
||||
#message_wrapper :: #unconsumes_variant ( _ ) => Ok(()),
|
||||
)*
|
||||
// dummy message type
|
||||
#message_wrapper :: Empty => Ok(())
|
||||
#message_wrapper :: Empty => Ok(()),
|
||||
|
||||
// And everything that's not WIP but no subsystem consumes it
|
||||
#[allow(unreachable_patterns)]
|
||||
unused_msg => {
|
||||
#support_crate :: gum :: warn!("Nothing consumes {:?}", unused_msg);
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(subsystem_name) = res {
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
// Copyright 2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use super::*;
|
||||
use proc_macro2::{Ident, TokenStream};
|
||||
use quote::quote;
|
||||
use syn::Path;
|
||||
|
||||
pub(crate) fn impl_dispatch(info: &OverseerInfo) -> TokenStream {
|
||||
let message_wrapper = &info.message_wrapper;
|
||||
|
||||
let dispatchable_variant = info
|
||||
.subsystems()
|
||||
.into_iter()
|
||||
.filter(|ssf| !ssf.no_dispatch)
|
||||
.filter(|ssf| !ssf.wip)
|
||||
.map(|ssf| ssf.generic.clone())
|
||||
.collect::<Vec<Ident>>();
|
||||
|
||||
let dispatchable_message = info
|
||||
.subsystems()
|
||||
.into_iter()
|
||||
.filter(|ssf| !ssf.no_dispatch)
|
||||
.filter(|ssf| !ssf.wip)
|
||||
.map(|ssf| ssf.consumes.clone())
|
||||
.collect::<Vec<Path>>();
|
||||
|
||||
let mut ts = TokenStream::new();
|
||||
if let Some(extern_network_ty) = &info.extern_network_ty.clone() {
|
||||
ts.extend(quote! {
|
||||
impl #message_wrapper {
|
||||
/// Generated dispatch iterator generator.
|
||||
pub fn dispatch_iter(extern_msg: #extern_network_ty) -> impl Iterator<Item=Self> + Send {
|
||||
[
|
||||
#(
|
||||
extern_msg
|
||||
// focuses on a `NetworkBridgeEvent< protocol_v1::* >`
|
||||
// TODO do not require this to be hardcoded, either externalize or ...
|
||||
// https://github.com/paritytech/polkadot/issues/3427
|
||||
.focus()
|
||||
.ok()
|
||||
.map(|event| {
|
||||
#message_wrapper :: #dispatchable_variant (
|
||||
// the inner type of the enum variant
|
||||
#dispatchable_message :: from( event )
|
||||
)
|
||||
}),
|
||||
)*
|
||||
]
|
||||
.into_iter()
|
||||
.filter_map(|x: Option<_>| x)
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
ts
|
||||
}
|
||||
@@ -21,7 +21,7 @@ use super::*;
|
||||
|
||||
/// Generates the wrapper type enum.
|
||||
pub(crate) fn impl_message_wrapper_enum(info: &OverseerInfo) -> Result<proc_macro2::TokenStream> {
|
||||
let consumes = info.consumes();
|
||||
let consumes = info.any_message();
|
||||
let consumes_variant = info.variant_names();
|
||||
|
||||
let outgoing = &info.outgoing_ty;
|
||||
@@ -52,7 +52,8 @@ pub(crate) fn impl_message_wrapper_enum(info: &OverseerInfo) -> Result<proc_macr
|
||||
};
|
||||
|
||||
let ts = quote! {
|
||||
/// Generated message type wrapper
|
||||
/// Generated message type wrapper over all possible messages
|
||||
/// used by any subsystem.
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Debug)]
|
||||
pub enum #message_wrapper {
|
||||
|
||||
@@ -1,259 +0,0 @@
|
||||
// Copyright 2021 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use quote::quote;
|
||||
use syn::Ident;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Implement a builder pattern for the `Overseer`-type,
|
||||
/// which acts as the gateway to constructing the overseer.
|
||||
pub(crate) fn impl_misc(info: &OverseerInfo) -> proc_macro2::TokenStream {
|
||||
let overseer_name = info.overseer_name.clone();
|
||||
let subsystem_sender_name =
|
||||
Ident::new(&(overseer_name.to_string() + "SubsystemSender"), overseer_name.span());
|
||||
let subsystem_ctx_name =
|
||||
Ident::new(&(overseer_name.to_string() + "SubsystemContext"), overseer_name.span());
|
||||
let consumes = &info.consumes();
|
||||
let signal = &info.extern_signal_ty;
|
||||
let wrapper_message = &info.message_wrapper;
|
||||
let error_ty = &info.extern_error_ty;
|
||||
let support_crate = info.support_crate_name();
|
||||
|
||||
let ts = quote! {
|
||||
/// Connector to send messages towards all subsystems,
|
||||
/// while tracking the which signals where already received.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct #subsystem_sender_name {
|
||||
/// Collection of channels to all subsystems.
|
||||
channels: ChannelsOut,
|
||||
/// Systemwide tick for which signals were received by all subsystems.
|
||||
signals_received: SignalsReceived,
|
||||
}
|
||||
|
||||
/// implementation for wrapping message type...
|
||||
#[#support_crate ::async_trait]
|
||||
impl SubsystemSender< #wrapper_message > for #subsystem_sender_name {
|
||||
async fn send_message(&mut self, msg: #wrapper_message) {
|
||||
self.channels.send_and_log_error(self.signals_received.load(), msg).await;
|
||||
}
|
||||
|
||||
async fn send_messages<T>(&mut self, msgs: T)
|
||||
where
|
||||
T: IntoIterator<Item = #wrapper_message> + Send,
|
||||
T::IntoIter: Send,
|
||||
{
|
||||
// This can definitely be optimized if necessary.
|
||||
for msg in msgs {
|
||||
self.send_message(msg).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn send_unbounded_message(&mut self, msg: #wrapper_message) {
|
||||
self.channels.send_unbounded_and_log_error(self.signals_received.load(), msg);
|
||||
}
|
||||
}
|
||||
|
||||
// ... but also implement for all individual messages to avoid
|
||||
// the necessity for manual wrapping, and do the conversion
|
||||
// based on the generated `From::from` impl for the individual variants.
|
||||
#(
|
||||
#[#support_crate ::async_trait]
|
||||
impl SubsystemSender< #consumes > for #subsystem_sender_name {
|
||||
async fn send_message(&mut self, msg: #consumes) {
|
||||
self.channels.send_and_log_error(self.signals_received.load(), #wrapper_message ::from ( msg )).await;
|
||||
}
|
||||
|
||||
async fn send_messages<T>(&mut self, msgs: T)
|
||||
where
|
||||
T: IntoIterator<Item = #consumes> + Send,
|
||||
T::IntoIter: Send,
|
||||
{
|
||||
// This can definitely be optimized if necessary.
|
||||
for msg in msgs {
|
||||
self.send_message(msg).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn send_unbounded_message(&mut self, msg: #consumes) {
|
||||
self.channels.send_unbounded_and_log_error(self.signals_received.load(), #wrapper_message ::from ( msg ));
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
/// A context type that is given to the [`Subsystem`] upon spawning.
|
||||
/// It can be used by [`Subsystem`] to communicate with other [`Subsystem`]s
|
||||
/// or to spawn it's [`SubsystemJob`]s.
|
||||
///
|
||||
/// [`Overseer`]: struct.Overseer.html
|
||||
/// [`Subsystem`]: trait.Subsystem.html
|
||||
/// [`SubsystemJob`]: trait.SubsystemJob.html
|
||||
#[derive(Debug)]
|
||||
#[allow(missing_docs)]
|
||||
pub struct #subsystem_ctx_name<M>{
|
||||
signals: #support_crate ::metered::MeteredReceiver< #signal >,
|
||||
messages: SubsystemIncomingMessages<M>,
|
||||
to_subsystems: #subsystem_sender_name,
|
||||
to_overseer: #support_crate ::metered::UnboundedMeteredSender<
|
||||
#support_crate ::ToOverseer
|
||||
>,
|
||||
signals_received: SignalsReceived,
|
||||
pending_incoming: Option<(usize, M)>,
|
||||
name: &'static str
|
||||
}
|
||||
|
||||
impl<M> #subsystem_ctx_name<M> {
|
||||
/// Create a new context.
|
||||
fn new(
|
||||
signals: #support_crate ::metered::MeteredReceiver< #signal >,
|
||||
messages: SubsystemIncomingMessages<M>,
|
||||
to_subsystems: ChannelsOut,
|
||||
to_overseer: #support_crate ::metered::UnboundedMeteredSender<#support_crate:: ToOverseer>,
|
||||
name: &'static str
|
||||
) -> Self {
|
||||
let signals_received = SignalsReceived::default();
|
||||
#subsystem_ctx_name {
|
||||
signals,
|
||||
messages,
|
||||
to_subsystems: #subsystem_sender_name {
|
||||
channels: to_subsystems,
|
||||
signals_received: signals_received.clone(),
|
||||
},
|
||||
to_overseer,
|
||||
signals_received,
|
||||
pending_incoming: None,
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
fn name(&self) -> &'static str {
|
||||
self.name
|
||||
}
|
||||
}
|
||||
|
||||
#[#support_crate ::async_trait]
|
||||
impl<M: std::fmt::Debug + Send + 'static> #support_crate ::SubsystemContext for #subsystem_ctx_name<M>
|
||||
where
|
||||
#subsystem_sender_name: #support_crate ::SubsystemSender< #wrapper_message >,
|
||||
#wrapper_message: From<M>,
|
||||
{
|
||||
type Message = M;
|
||||
type Signal = #signal;
|
||||
type Sender = #subsystem_sender_name;
|
||||
type AllMessages = #wrapper_message;
|
||||
type Error = #error_ty;
|
||||
|
||||
async fn try_recv(&mut self) -> ::std::result::Result<Option<FromOverseer<M, #signal>>, ()> {
|
||||
match #support_crate ::poll!(self.recv()) {
|
||||
#support_crate ::Poll::Ready(msg) => Ok(Some(msg.map_err(|_| ())?)),
|
||||
#support_crate ::Poll::Pending => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
async fn recv(&mut self) -> ::std::result::Result<FromOverseer<M, #signal>, #error_ty> {
|
||||
loop {
|
||||
// If we have a message pending an overseer signal, we only poll for signals
|
||||
// in the meantime.
|
||||
if let Some((needs_signals_received, msg)) = self.pending_incoming.take() {
|
||||
if needs_signals_received <= self.signals_received.load() {
|
||||
return Ok(#support_crate ::FromOverseer::Communication { msg });
|
||||
} else {
|
||||
self.pending_incoming = Some((needs_signals_received, msg));
|
||||
|
||||
// wait for next signal.
|
||||
let signal = self.signals.next().await
|
||||
.ok_or(#support_crate ::OverseerError::Context(
|
||||
"Signal channel is terminated and empty."
|
||||
.to_owned()
|
||||
))?;
|
||||
|
||||
self.signals_received.inc();
|
||||
return Ok(#support_crate ::FromOverseer::Signal(signal))
|
||||
}
|
||||
}
|
||||
|
||||
let mut await_message = self.messages.next().fuse();
|
||||
let mut await_signal = self.signals.next().fuse();
|
||||
let signals_received = self.signals_received.load();
|
||||
let pending_incoming = &mut self.pending_incoming;
|
||||
|
||||
// Otherwise, wait for the next signal or incoming message.
|
||||
let from_overseer = #support_crate ::futures::select_biased! {
|
||||
signal = await_signal => {
|
||||
let signal = signal
|
||||
.ok_or(#support_crate ::OverseerError::Context(
|
||||
"Signal channel is terminated and empty."
|
||||
.to_owned()
|
||||
))?;
|
||||
|
||||
#support_crate ::FromOverseer::Signal(signal)
|
||||
}
|
||||
msg = await_message => {
|
||||
let packet = msg
|
||||
.ok_or(#support_crate ::OverseerError::Context(
|
||||
"Message channel is terminated and empty."
|
||||
.to_owned()
|
||||
))?;
|
||||
|
||||
if packet.signals_received > signals_received {
|
||||
// wait until we've received enough signals to return this message.
|
||||
*pending_incoming = Some((packet.signals_received, packet.message));
|
||||
continue;
|
||||
} else {
|
||||
// we know enough to return this message.
|
||||
#support_crate ::FromOverseer::Communication { msg: packet.message}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let #support_crate ::FromOverseer::Signal(_) = from_overseer {
|
||||
self.signals_received.inc();
|
||||
}
|
||||
|
||||
return Ok(from_overseer);
|
||||
}
|
||||
}
|
||||
|
||||
fn sender(&mut self) -> &mut Self::Sender {
|
||||
&mut self.to_subsystems
|
||||
}
|
||||
|
||||
fn spawn(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>)
|
||||
-> ::std::result::Result<(), #error_ty>
|
||||
{
|
||||
self.to_overseer.unbounded_send(#support_crate ::ToOverseer::SpawnJob {
|
||||
name,
|
||||
subsystem: Some(self.name()),
|
||||
s,
|
||||
}).map_err(|_| #support_crate ::OverseerError::TaskSpawn(name))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn spawn_blocking(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>)
|
||||
-> ::std::result::Result<(), #error_ty>
|
||||
{
|
||||
self.to_overseer.unbounded_send(#support_crate ::ToOverseer::SpawnBlockingJob {
|
||||
name,
|
||||
subsystem: Some(self.name()),
|
||||
s,
|
||||
}).map_err(|_| #support_crate ::OverseerError::TaskSpawn(name))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ts
|
||||
}
|
||||
@@ -37,7 +37,7 @@ pub(crate) fn impl_overseer_struct(info: &OverseerInfo) -> proc_macro2::TokenStr
|
||||
S: #support_crate ::SpawnNamed,
|
||||
};
|
||||
// TODO add `where ..` clauses for baggage types
|
||||
// TODO https://github.com/paritytech/polkadot/issues/3427
|
||||
// TODO <https://github.com/paritytech/polkadot/issues/3427>
|
||||
|
||||
let consumes = &info.consumes_without_wip();
|
||||
let consumes_variant = &info.variant_names_without_wip();
|
||||
@@ -148,6 +148,12 @@ pub(crate) fn impl_overseer_struct(info: &OverseerInfo) -> proc_macro2::TokenStr
|
||||
#message_wrapper :: #unconsumes_variant ( _ ) => {}
|
||||
)*
|
||||
#message_wrapper :: Empty => {}
|
||||
|
||||
// And everything that's not WIP but no subsystem consumes it
|
||||
#[allow(unreachable_patterns)]
|
||||
unused_msg => {
|
||||
#support_crate :: gum :: warn!("Nothing consumes {:?}", unused_msg);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -0,0 +1,712 @@
|
||||
// Copyright 2022 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use proc_macro2::TokenStream;
|
||||
use quote::quote;
|
||||
use syn::{Ident, Path, Result, Type};
|
||||
|
||||
use petgraph::{
|
||||
dot::{self, Dot},
|
||||
graph::NodeIndex,
|
||||
visit::EdgeRef,
|
||||
Direction,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Render a graphviz (aka dot graph) to a file.
|
||||
fn graphviz(
|
||||
graph: &petgraph::Graph<Ident, Path>,
|
||||
dest: &mut impl std::io::Write,
|
||||
) -> std::io::Result<()> {
|
||||
let config = &[dot::Config::EdgeNoLabel, dot::Config::NodeNoLabel][..];
|
||||
let dot = Dot::with_attr_getters(
|
||||
graph,
|
||||
config,
|
||||
&|_graph, edge| -> String {
|
||||
format!(
|
||||
r#"label="{}""#,
|
||||
edge.weight().get_ident().expect("Must have a trailing identifier. qed")
|
||||
)
|
||||
},
|
||||
&|_graph, (_node_index, subsystem_name)| -> String {
|
||||
format!(r#"label="{}""#, subsystem_name,)
|
||||
},
|
||||
);
|
||||
dest.write_all(format!("{:?}", &dot).as_bytes())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generates all subsystem types and related accumulation traits.
|
||||
pub(crate) fn impl_subsystem_types_all(info: &OverseerInfo) -> Result<TokenStream> {
|
||||
let mut ts = TokenStream::new();
|
||||
|
||||
let overseer_name = &info.overseer_name;
|
||||
let span = overseer_name.span();
|
||||
let all_messages_wrapper = &info.message_wrapper;
|
||||
let support_crate = info.support_crate_name();
|
||||
let signal_ty = &info.extern_signal_ty;
|
||||
let error_ty = &info.extern_error_ty;
|
||||
|
||||
// create a directed graph with all the subsystems as nodes and the messages as edges
|
||||
// key is always the message path, values are node indices in the graph and the subsystem generic identifier
|
||||
// store the message path and the source sender, both in the graph as well as identifier
|
||||
let mut outgoing_lut = HashMap::<&Path, Vec<(Ident, NodeIndex)>>::with_capacity(128);
|
||||
// same for consuming the incoming messages
|
||||
let mut consuming_lut = HashMap::<&Path, (Ident, NodeIndex)>::with_capacity(128);
|
||||
|
||||
// Ident = Node = subsystem generic names
|
||||
// Path = Edge = messages
|
||||
let mut graph = petgraph::Graph::<Ident, Path>::new();
|
||||
|
||||
// prepare the full index of outgoing and source subsystems
|
||||
for ssf in info.subsystems() {
|
||||
let node_index = graph.add_node(ssf.generic.clone());
|
||||
for outgoing in ssf.messages_to_send.iter() {
|
||||
outgoing_lut
|
||||
.entry(outgoing)
|
||||
.or_default()
|
||||
.push((ssf.generic.clone(), node_index));
|
||||
}
|
||||
consuming_lut.insert(&ssf.message_to_consume, (ssf.generic.clone(), node_index));
|
||||
}
|
||||
|
||||
for (message_ty, (_consuming_subsystem_ident, consuming_node_index)) in consuming_lut.iter() {
|
||||
// match the outgoing ones that were registered above with the consumed message
|
||||
if let Some(origin_subsystems) = outgoing_lut.get(message_ty) {
|
||||
for (_origin_subsystem_ident, sending_node_index) in origin_subsystems.iter() {
|
||||
graph.add_edge(*sending_node_index, *consuming_node_index, (*message_ty).clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All outgoing edges are now usable to derive everything we need
|
||||
for node_index in graph.node_indices() {
|
||||
let subsystem_name = graph[node_index].to_string();
|
||||
let outgoing_wrapper = Ident::new(&(subsystem_name + "OutgoingMessages"), span);
|
||||
|
||||
// cannot be a hashmap, duplicate keys and sorting required
|
||||
// maps outgoing messages to the subsystem that consumes it
|
||||
let outgoing_to_consumer = graph
|
||||
.edges_directed(node_index, Direction::Outgoing)
|
||||
.map(|edge| {
|
||||
let message_ty = edge.weight();
|
||||
let subsystem_generic_consumer = graph[edge.target()].clone();
|
||||
Ok((to_variant(message_ty, span.clone())?, subsystem_generic_consumer))
|
||||
})
|
||||
.collect::<Result<Vec<(Ident, Ident)>>>()?;
|
||||
|
||||
// Split it for usage with quote
|
||||
let outgoing_variant = outgoing_to_consumer.iter().map(|x| x.0.clone()).collect::<Vec<_>>();
|
||||
let subsystem_generic = outgoing_to_consumer.into_iter().map(|x| x.1).collect::<Vec<_>>();
|
||||
|
||||
ts.extend(quote! {
|
||||
impl ::std::convert::From< #outgoing_wrapper > for #all_messages_wrapper {
|
||||
fn from(message: #outgoing_wrapper) -> Self {
|
||||
match message {
|
||||
#(
|
||||
#outgoing_wrapper :: #outgoing_variant ( msg ) => #all_messages_wrapper :: #subsystem_generic ( msg ),
|
||||
)*
|
||||
#outgoing_wrapper :: Empty => #all_messages_wrapper :: Empty,
|
||||
// And everything that's not WIP but no subsystem consumes it
|
||||
#[allow(unreachable_patterns)]
|
||||
unused_msg => {
|
||||
#support_crate :: gum :: warn!("Nothing consumes {:?}", unused_msg);
|
||||
#all_messages_wrapper :: Empty
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Dump the graph to file.
|
||||
if cfg!(feature = "graph") || true {
|
||||
let path = std::path::PathBuf::from(env!("OUT_DIR"))
|
||||
.join(overseer_name.to_string().to_lowercase() + "-subsystem-messaging.dot");
|
||||
if let Err(e) = std::fs::OpenOptions::new()
|
||||
.truncate(true)
|
||||
.create(true)
|
||||
.write(true)
|
||||
.open(&path)
|
||||
.and_then(|mut f| graphviz(&graph, &mut f))
|
||||
{
|
||||
eprintln!("Failed to write dot graph to {}: {:?}", path.display(), e);
|
||||
} else {
|
||||
println!("Wrote dot graph to {}", path.display());
|
||||
}
|
||||
}
|
||||
|
||||
let subsystem_sender_name = &Ident::new(&(overseer_name.to_string() + "Sender"), span);
|
||||
let subsystem_ctx_name = &Ident::new(&(overseer_name.to_string() + "SubsystemContext"), span);
|
||||
ts.extend(impl_subsystem_context(info, &subsystem_sender_name, &subsystem_ctx_name));
|
||||
|
||||
ts.extend(impl_associate_outgoing_messages_trait(&all_messages_wrapper));
|
||||
|
||||
ts.extend(impl_subsystem_sender(
|
||||
support_crate,
|
||||
info.subsystems().iter().map(|ssf| {
|
||||
let outgoing_wrapper =
|
||||
Ident::new(&(ssf.generic.to_string() + "OutgoingMessages"), span);
|
||||
outgoing_wrapper
|
||||
}),
|
||||
&all_messages_wrapper,
|
||||
&subsystem_sender_name,
|
||||
));
|
||||
|
||||
// Create all subsystem specific types, one by one
|
||||
for ssf in info.subsystems() {
|
||||
let subsystem_name = ssf.generic.to_string();
|
||||
let outgoing_wrapper = &Ident::new(&(subsystem_name.clone() + "OutgoingMessages"), span);
|
||||
|
||||
let subsystem_ctx_trait = &Ident::new(&(subsystem_name.clone() + "ContextTrait"), span);
|
||||
let subsystem_sender_trait = &Ident::new(&(subsystem_name.clone() + "SenderTrait"), span);
|
||||
|
||||
ts.extend(impl_per_subsystem_helper_traits(
|
||||
info,
|
||||
subsystem_ctx_name,
|
||||
subsystem_ctx_trait,
|
||||
subsystem_sender_name,
|
||||
subsystem_sender_trait,
|
||||
&ssf.message_to_consume,
|
||||
&ssf.messages_to_send,
|
||||
outgoing_wrapper,
|
||||
));
|
||||
|
||||
ts.extend(impl_associate_outgoing_messages(&ssf.message_to_consume, &outgoing_wrapper));
|
||||
|
||||
ts.extend(impl_wrapper_enum(&outgoing_wrapper, ssf.messages_to_send.as_slice())?);
|
||||
}
|
||||
|
||||
// impl the emtpy tuple handling for tests
|
||||
let empty_tuple: Type = parse_quote! { () };
|
||||
ts.extend(impl_subsystem_context_trait_for(
|
||||
empty_tuple.clone(),
|
||||
&[],
|
||||
empty_tuple,
|
||||
all_messages_wrapper,
|
||||
subsystem_ctx_name,
|
||||
subsystem_sender_name,
|
||||
support_crate,
|
||||
signal_ty,
|
||||
error_ty,
|
||||
));
|
||||
|
||||
Ok(ts)
|
||||
}
|
||||
|
||||
/// Extract the final component of the message type path as used in the `#[subsystem(consumes: path::to::Foo)]` annotation.
|
||||
fn to_variant(path: &Path, span: Span) -> Result<Ident> {
|
||||
let ident = path
|
||||
.segments
|
||||
.last()
|
||||
.ok_or_else(|| syn::Error::new(span, "Path is empty, but it must end with an identifier"))
|
||||
.map(|segment| segment.ident.clone())?;
|
||||
Ok(ident)
|
||||
}
|
||||
|
||||
/// Converts the outgoing message types to variants.
|
||||
///
|
||||
/// Note: Commonly this is `${X}Message` becomes `${X}OutgoingMessages::${X}Message`
|
||||
/// where for `AllMessages` it would be `AllMessages::${X}`.
|
||||
fn to_variants(message_types: &[Path], span: Span) -> Result<Vec<Ident>> {
|
||||
let variants: Vec<_> =
|
||||
Result::from_iter(message_types.into_iter().map(|path| to_variant(path, span.clone())))?;
|
||||
Ok(variants)
|
||||
}
|
||||
|
||||
/// Generates the wrapper type enum, no bells or whistles.
|
||||
pub(crate) fn impl_wrapper_enum(wrapper: &Ident, message_types: &[Path]) -> Result<TokenStream> {
|
||||
// The message types are path based, each of them must finish with a type
|
||||
// and as such we do this upfront.
|
||||
let variants = to_variants(message_types, wrapper.span())?;
|
||||
|
||||
let ts = quote! {
|
||||
#[allow(missing_docs)]
|
||||
#[derive(Debug)]
|
||||
pub enum #wrapper {
|
||||
#(
|
||||
#variants ( #message_types ),
|
||||
)*
|
||||
Empty,
|
||||
}
|
||||
|
||||
#(
|
||||
impl ::std::convert::From< #message_types > for #wrapper {
|
||||
fn from(message: #message_types) -> Self {
|
||||
#wrapper :: #variants ( message )
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
// Useful for unit and integration tests:
|
||||
impl ::std::convert::From< () > for #wrapper {
|
||||
fn from(_message: ()) -> Self {
|
||||
#wrapper :: Empty
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(ts)
|
||||
}
|
||||
|
||||
/// Create the subsystem sender type and implements `trait SubsystemSender`
|
||||
/// for the `#outgoing_wrappers: From<OutgoingMessage>` with the proper associated types.
|
||||
pub(crate) fn impl_subsystem_sender(
|
||||
support_crate: &Path,
|
||||
outgoing_wrappers: impl IntoIterator<Item = Ident>,
|
||||
all_messages_wrapper: &Ident,
|
||||
subsystem_sender_name: &Ident,
|
||||
) -> TokenStream {
|
||||
let mut ts = quote! {
|
||||
/// Connector to send messages towards all subsystems,
|
||||
/// while tracking the which signals where already received.
|
||||
#[derive(Debug)]
|
||||
pub struct #subsystem_sender_name < OutgoingWrapper > {
|
||||
/// Collection of channels to all subsystems.
|
||||
channels: ChannelsOut,
|
||||
/// Systemwide tick for which signals were received by all subsystems.
|
||||
signals_received: SignalsReceived,
|
||||
/// Keep that marker around.
|
||||
_phantom: ::core::marker::PhantomData< OutgoingWrapper >,
|
||||
}
|
||||
|
||||
// can't derive due to `PhantomData` and `OutgoingWrapper` not being
|
||||
// bounded by `Clone`.
|
||||
impl<OutgoingWrapper> std::clone::Clone for #subsystem_sender_name < OutgoingWrapper > {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
channels: self.channels.clone(),
|
||||
signals_received: self.signals_received.clone(),
|
||||
_phantom: ::core::marker::PhantomData::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Create the same for a wrapping enum:
|
||||
//
|
||||
// 1. subsystem specific `*OutgoingMessages`-type
|
||||
// 2. overseer-global-`AllMessages`-type
|
||||
let wrapped = |outgoing_wrapper: &TokenStream| {
|
||||
quote! {
|
||||
#[#support_crate ::async_trait]
|
||||
impl<OutgoingMessage> SubsystemSender< OutgoingMessage > for #subsystem_sender_name < #outgoing_wrapper >
|
||||
where
|
||||
OutgoingMessage: Send + 'static,
|
||||
#outgoing_wrapper: ::std::convert::From<OutgoingMessage> + Send,
|
||||
#all_messages_wrapper: ::std::convert::From< #outgoing_wrapper > + Send,
|
||||
{
|
||||
async fn send_message(&mut self, msg: OutgoingMessage)
|
||||
{
|
||||
self.channels.send_and_log_error(
|
||||
self.signals_received.load(),
|
||||
<#all_messages_wrapper as ::std::convert::From<_>> ::from (
|
||||
<#outgoing_wrapper as ::std::convert::From<_>> :: from ( msg )
|
||||
)
|
||||
).await;
|
||||
}
|
||||
|
||||
async fn send_messages<I>(&mut self, msgs: I)
|
||||
where
|
||||
I: IntoIterator<Item=OutgoingMessage> + Send,
|
||||
I::IntoIter: Iterator<Item=OutgoingMessage> + Send,
|
||||
{
|
||||
for msg in msgs {
|
||||
self.send_message( msg ).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn send_unbounded_message(&mut self, msg: OutgoingMessage)
|
||||
{
|
||||
self.channels.send_unbounded_and_log_error(
|
||||
self.signals_received.load(),
|
||||
<#all_messages_wrapper as ::std::convert::From<_>> ::from (
|
||||
<#outgoing_wrapper as ::std::convert::From<_>> :: from ( msg )
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
for outgoing_wrapper in outgoing_wrappers {
|
||||
ts.extend(wrapped("e! {
|
||||
#outgoing_wrapper
|
||||
}));
|
||||
}
|
||||
|
||||
ts.extend(wrapped("e! {
|
||||
()
|
||||
}));
|
||||
|
||||
ts
|
||||
}
|
||||
|
||||
/// Define the `trait AssociateOutgoing` and implement it for `#all_messages_wrapper` and `()`.
|
||||
pub(crate) fn impl_associate_outgoing_messages_trait(all_messages_wrapper: &Ident) -> TokenStream {
|
||||
quote! {
|
||||
/// Binds a generated type covering all declared outgoing messages,
|
||||
/// which implements `#generated_outgoing: From<M>` for all annotated types
|
||||
/// of a particular subsystem.
|
||||
///
|
||||
/// Note: This works because there is a 1?:1 relation between consumed messages and subsystems.
|
||||
pub trait AssociateOutgoing: ::std::fmt::Debug + Send {
|
||||
/// The associated _outgoing_ messages for a subsystem that _consumes_ the message `Self`.
|
||||
type OutgoingMessages: Into< #all_messages_wrapper > + ::std::fmt::Debug + Send;
|
||||
}
|
||||
|
||||
// Helper for tests, where nothing is ever sent.
|
||||
impl AssociateOutgoing for () {
|
||||
type OutgoingMessages = ();
|
||||
}
|
||||
|
||||
// Helper for tests, allows sending of arbitrary messages give
|
||||
// an test context.
|
||||
impl AssociateOutgoing for #all_messages_wrapper {
|
||||
type OutgoingMessages = #all_messages_wrapper ;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement `AssociateOutgoing` for `#consumes` being handled by a particular subsystem.
|
||||
///
|
||||
/// Binds the outgoing messages to the inbound message.
|
||||
///
|
||||
/// Note: Works, since there is a 1:1 relation between inbound message type and subsystem declarations.
|
||||
/// Note: A workaround until default associated types work in `rustc`.
|
||||
pub(crate) fn impl_associate_outgoing_messages(
|
||||
consumes: &Path,
|
||||
outgoing_wrapper: &Ident,
|
||||
) -> TokenStream {
|
||||
quote! {
|
||||
impl AssociateOutgoing for #outgoing_wrapper {
|
||||
type OutgoingMessages = #outgoing_wrapper;
|
||||
}
|
||||
|
||||
impl AssociateOutgoing for #consumes {
|
||||
type OutgoingMessages = #outgoing_wrapper;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement `trait SubsystemContext` for a particular subsystem context,
|
||||
/// that is generated by the proc-macro too.
|
||||
pub(crate) fn impl_subsystem_context_trait_for(
|
||||
consumes: Type,
|
||||
outgoing: &[Type],
|
||||
outgoing_wrapper: Type,
|
||||
all_messages_wrapper: &Ident,
|
||||
subsystem_ctx_name: &Ident,
|
||||
subsystem_sender_name: &Ident,
|
||||
support_crate: &Path,
|
||||
signal: &Path,
|
||||
error_ty: &Path,
|
||||
) -> TokenStream {
|
||||
// impl the subsystem context trait
|
||||
let where_clause = quote! {
|
||||
#consumes: AssociateOutgoing + ::std::fmt::Debug + Send + 'static,
|
||||
#all_messages_wrapper: From< #outgoing_wrapper >,
|
||||
#all_messages_wrapper: From< #consumes >,
|
||||
#outgoing_wrapper: #( From< #outgoing > )+*,
|
||||
};
|
||||
|
||||
quote! {
|
||||
#[#support_crate ::async_trait]
|
||||
impl #support_crate ::SubsystemContext for #subsystem_ctx_name < #consumes >
|
||||
where
|
||||
#where_clause
|
||||
{
|
||||
type Message = #consumes;
|
||||
type Signal = #signal;
|
||||
type OutgoingMessages = #outgoing_wrapper;
|
||||
type Sender = #subsystem_sender_name < #outgoing_wrapper >;
|
||||
type Error = #error_ty;
|
||||
|
||||
async fn try_recv(&mut self) -> ::std::result::Result<Option<FromOverseer< Self::Message, #signal>>, ()> {
|
||||
match #support_crate ::poll!(self.recv()) {
|
||||
#support_crate ::Poll::Ready(msg) => Ok(Some(msg.map_err(|_| ())?)),
|
||||
#support_crate ::Poll::Pending => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
async fn recv(&mut self) -> ::std::result::Result<FromOverseer<Self::Message, #signal>, #error_ty> {
|
||||
loop {
|
||||
// If we have a message pending an overseer signal, we only poll for signals
|
||||
// in the meantime.
|
||||
if let Some((needs_signals_received, msg)) = self.pending_incoming.take() {
|
||||
if needs_signals_received <= self.signals_received.load() {
|
||||
return Ok( #support_crate ::FromOverseer::Communication { msg });
|
||||
} else {
|
||||
self.pending_incoming = Some((needs_signals_received, msg));
|
||||
|
||||
// wait for next signal.
|
||||
let signal = self.signals.next().await
|
||||
.ok_or(#support_crate ::OverseerError::Context(
|
||||
"Signal channel is terminated and empty."
|
||||
.to_owned()
|
||||
))?;
|
||||
|
||||
self.signals_received.inc();
|
||||
return Ok( #support_crate ::FromOverseer::Signal(signal))
|
||||
}
|
||||
}
|
||||
|
||||
let mut await_message = self.messages.next().fuse();
|
||||
let mut await_signal = self.signals.next().fuse();
|
||||
let signals_received = self.signals_received.load();
|
||||
let pending_incoming = &mut self.pending_incoming;
|
||||
|
||||
// Otherwise, wait for the next signal or incoming message.
|
||||
let from_overseer = #support_crate ::futures::select_biased! {
|
||||
signal = await_signal => {
|
||||
let signal = signal
|
||||
.ok_or( #support_crate ::OverseerError::Context(
|
||||
"Signal channel is terminated and empty."
|
||||
.to_owned()
|
||||
))?;
|
||||
|
||||
#support_crate ::FromOverseer::Signal(signal)
|
||||
}
|
||||
msg = await_message => {
|
||||
let packet = msg
|
||||
.ok_or( #support_crate ::OverseerError::Context(
|
||||
"Message channel is terminated and empty."
|
||||
.to_owned()
|
||||
))?;
|
||||
|
||||
if packet.signals_received > signals_received {
|
||||
// wait until we've received enough signals to return this message.
|
||||
*pending_incoming = Some((packet.signals_received, packet.message));
|
||||
continue;
|
||||
} else {
|
||||
// we know enough to return this message.
|
||||
#support_crate ::FromOverseer::Communication { msg: packet.message}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let #support_crate ::FromOverseer::Signal(_) = from_overseer {
|
||||
self.signals_received.inc();
|
||||
}
|
||||
|
||||
return Ok(from_overseer);
|
||||
}
|
||||
}
|
||||
|
||||
fn sender(&mut self) -> &mut Self::Sender {
|
||||
&mut self.to_subsystems
|
||||
}
|
||||
|
||||
fn spawn(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>)
|
||||
-> ::std::result::Result<(), #error_ty>
|
||||
{
|
||||
self.to_overseer.unbounded_send(#support_crate ::ToOverseer::SpawnJob {
|
||||
name,
|
||||
subsystem: Some(self.name()),
|
||||
s,
|
||||
}).map_err(|_| #support_crate ::OverseerError::TaskSpawn(name))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn spawn_blocking(&mut self, name: &'static str, s: Pin<Box<dyn Future<Output = ()> + Send>>)
|
||||
-> ::std::result::Result<(), #error_ty>
|
||||
{
|
||||
self.to_overseer.unbounded_send(#support_crate ::ToOverseer::SpawnBlockingJob {
|
||||
name,
|
||||
subsystem: Some(self.name()),
|
||||
s,
|
||||
}).map_err(|_| #support_crate ::OverseerError::TaskSpawn(name))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement the additional subsystem accumulation traits, for simplified usage,
|
||||
/// i.e. `${Subsystem}SenderTrait` and `${Subsystem}ContextTrait`.
|
||||
pub(crate) fn impl_per_subsystem_helper_traits(
|
||||
info: &OverseerInfo,
|
||||
subsystem_ctx_name: &Ident,
|
||||
subsystem_ctx_trait: &Ident,
|
||||
subsystem_sender_name: &Ident,
|
||||
subsystem_sender_trait: &Ident,
|
||||
consumes: &Path,
|
||||
outgoing: &[Path],
|
||||
outgoing_wrapper: &Ident,
|
||||
) -> TokenStream {
|
||||
let all_messages_wrapper = &info.message_wrapper;
|
||||
let signal_ty = &info.extern_signal_ty;
|
||||
let error_ty = &info.extern_error_ty;
|
||||
let support_crate = info.support_crate_name();
|
||||
|
||||
let mut ts = TokenStream::new();
|
||||
|
||||
// Create a helper trait bound of all outgoing messages, and the generated wrapper type
|
||||
// for ease of use within subsystems:
|
||||
let acc_sender_trait_bounds = quote! {
|
||||
#support_crate ::SubsystemSender< #outgoing_wrapper >
|
||||
#(
|
||||
+ #support_crate ::SubsystemSender< #outgoing >
|
||||
)*
|
||||
+ #support_crate ::SubsystemSender< () >
|
||||
+ Send
|
||||
+ 'static
|
||||
};
|
||||
|
||||
ts.extend(quote! {
|
||||
/// A abstracting trait for usage with subsystems.
|
||||
pub trait #subsystem_sender_trait : #acc_sender_trait_bounds
|
||||
{}
|
||||
|
||||
impl<T> #subsystem_sender_trait for T
|
||||
where
|
||||
T: #acc_sender_trait_bounds
|
||||
{}
|
||||
});
|
||||
|
||||
// Create a helper accumulated per subsystem trait bound:
|
||||
let where_clause = quote! {
|
||||
#consumes: AssociateOutgoing + ::std::fmt::Debug + Send + 'static,
|
||||
#all_messages_wrapper: From< #outgoing_wrapper >,
|
||||
#all_messages_wrapper: From< #consumes >,
|
||||
#all_messages_wrapper: From< () >,
|
||||
#outgoing_wrapper: #( From< #outgoing > )+*,
|
||||
#outgoing_wrapper: From< () >,
|
||||
};
|
||||
|
||||
ts.extend(quote! {
|
||||
/// Accumulative trait for a particular subsystem wrapper.
|
||||
pub trait #subsystem_ctx_trait : SubsystemContext <
|
||||
Message = #consumes,
|
||||
Signal = #signal_ty,
|
||||
OutgoingMessages = #outgoing_wrapper,
|
||||
// Sender,
|
||||
Error = #error_ty,
|
||||
>
|
||||
where
|
||||
#where_clause
|
||||
<Self as SubsystemContext>::Sender:
|
||||
#subsystem_sender_trait
|
||||
+ #acc_sender_trait_bounds,
|
||||
{
|
||||
/// Sender.
|
||||
type Sender: #subsystem_sender_trait;
|
||||
}
|
||||
|
||||
impl<T> #subsystem_ctx_trait for T
|
||||
where
|
||||
T: SubsystemContext <
|
||||
Message = #consumes,
|
||||
Signal = #signal_ty,
|
||||
OutgoingMessages = #outgoing_wrapper,
|
||||
// Sender
|
||||
Error = #error_ty,
|
||||
>,
|
||||
#where_clause
|
||||
<T as SubsystemContext>::Sender:
|
||||
#subsystem_sender_trait
|
||||
+ #acc_sender_trait_bounds,
|
||||
{
|
||||
type Sender = <T as SubsystemContext>::Sender;
|
||||
}
|
||||
});
|
||||
|
||||
ts.extend(impl_subsystem_context_trait_for(
|
||||
parse_quote! { #consumes },
|
||||
&Vec::from_iter(outgoing.iter().map(|path| {
|
||||
parse_quote! { #path }
|
||||
})),
|
||||
parse_quote! { #outgoing_wrapper },
|
||||
all_messages_wrapper,
|
||||
subsystem_ctx_name,
|
||||
subsystem_sender_name,
|
||||
support_crate,
|
||||
signal_ty,
|
||||
error_ty,
|
||||
));
|
||||
ts
|
||||
}
|
||||
|
||||
/// Generate the subsystem context type and provide `fn new` on it.
|
||||
///
|
||||
/// Note: The generated `fn new` is used by the [builder pattern](../impl_builder.rs).
|
||||
pub(crate) fn impl_subsystem_context(
|
||||
info: &OverseerInfo,
|
||||
subsystem_sender_name: &Ident,
|
||||
subsystem_ctx_name: &Ident,
|
||||
) -> TokenStream {
|
||||
let signal_ty = &info.extern_signal_ty;
|
||||
let support_crate = info.support_crate_name();
|
||||
|
||||
let ts = quote! {
|
||||
/// A context type that is given to the [`Subsystem`] upon spawning.
|
||||
/// It can be used by [`Subsystem`] to communicate with other [`Subsystem`]s
|
||||
/// or to spawn it's [`SubsystemJob`]s.
|
||||
///
|
||||
/// [`Overseer`]: struct.Overseer.html
|
||||
/// [`Subsystem`]: trait.Subsystem.html
|
||||
/// [`SubsystemJob`]: trait.SubsystemJob.html
|
||||
#[derive(Debug)]
|
||||
#[allow(missing_docs)]
|
||||
pub struct #subsystem_ctx_name<M: AssociateOutgoing + Send + 'static> {
|
||||
signals: #support_crate ::metered::MeteredReceiver< #signal_ty >,
|
||||
messages: SubsystemIncomingMessages< M >,
|
||||
to_subsystems: #subsystem_sender_name < <M as AssociateOutgoing>::OutgoingMessages >,
|
||||
to_overseer: #support_crate ::metered::UnboundedMeteredSender<
|
||||
#support_crate ::ToOverseer
|
||||
>,
|
||||
signals_received: SignalsReceived,
|
||||
pending_incoming: Option<(usize, M)>,
|
||||
name: &'static str
|
||||
}
|
||||
|
||||
impl<M> #subsystem_ctx_name <M>
|
||||
where
|
||||
M: AssociateOutgoing + Send + 'static,
|
||||
{
|
||||
/// Create a new context.
|
||||
fn new(
|
||||
signals: #support_crate ::metered::MeteredReceiver< #signal_ty >,
|
||||
messages: SubsystemIncomingMessages< M >,
|
||||
to_subsystems: ChannelsOut,
|
||||
to_overseer: #support_crate ::metered::UnboundedMeteredSender<#support_crate:: ToOverseer>,
|
||||
name: &'static str
|
||||
) -> Self {
|
||||
let signals_received = SignalsReceived::default();
|
||||
#subsystem_ctx_name :: <M> {
|
||||
signals,
|
||||
messages,
|
||||
to_subsystems: #subsystem_sender_name :: < <M as AssociateOutgoing>::OutgoingMessages > {
|
||||
channels: to_subsystems,
|
||||
signals_received: signals_received.clone(),
|
||||
_phantom: ::core::marker::PhantomData::default(),
|
||||
},
|
||||
to_overseer,
|
||||
signals_received,
|
||||
pending_incoming: None,
|
||||
name
|
||||
}
|
||||
}
|
||||
|
||||
fn name(&self) -> &'static str {
|
||||
self.name
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ts
|
||||
}
|
||||
@@ -14,33 +14,44 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#![deny(unused_crate_dependencies)]
|
||||
|
||||
use proc_macro2::{Ident, Span, TokenStream};
|
||||
use quote::{quote, ToTokens};
|
||||
use syn::{parse2, Result};
|
||||
use syn::{parse_quote, spanned::Spanned, Path};
|
||||
|
||||
mod impl_builder;
|
||||
mod impl_channels_out;
|
||||
mod impl_dispatch;
|
||||
mod impl_message_wrapper;
|
||||
mod impl_misc;
|
||||
mod impl_overseer;
|
||||
mod parse_attr;
|
||||
mod parse_struct;
|
||||
|
||||
use impl_builder::*;
|
||||
use impl_channels_out::*;
|
||||
use impl_dispatch::*;
|
||||
use impl_message_wrapper::*;
|
||||
use impl_misc::*;
|
||||
use impl_overseer::*;
|
||||
use parse_attr::*;
|
||||
use parse_struct::*;
|
||||
mod impl_subsystem_ctx_sender;
|
||||
mod overseer;
|
||||
mod parse;
|
||||
mod subsystem;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use impl_builder::*;
|
||||
use impl_channels_out::*;
|
||||
use impl_message_wrapper::*;
|
||||
use impl_overseer::*;
|
||||
use impl_subsystem_ctx_sender::*;
|
||||
use parse::*;
|
||||
|
||||
use self::{overseer::*, subsystem::*};
|
||||
|
||||
/// Obtain the support crate `Path` as `TokenStream`.
|
||||
pub(crate) fn support_crate() -> Result<Path, proc_macro_crate::Error> {
|
||||
Ok(if cfg!(test) {
|
||||
parse_quote! {crate}
|
||||
} else {
|
||||
use proc_macro_crate::{crate_name, FoundCrate};
|
||||
let crate_name = crate_name("polkadot-overseer-gen")?;
|
||||
match crate_name {
|
||||
FoundCrate::Itself => parse_quote! {crate},
|
||||
FoundCrate::Name(name) => Ident::new(&name, Span::call_site()).into(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[proc_macro_attribute]
|
||||
pub fn overlord(
|
||||
attr: proc_macro::TokenStream,
|
||||
@@ -53,58 +64,26 @@ pub fn overlord(
|
||||
.into()
|
||||
}
|
||||
|
||||
pub(crate) fn impl_overseer_gen(
|
||||
attr: TokenStream,
|
||||
orig: TokenStream,
|
||||
) -> Result<proc_macro2::TokenStream> {
|
||||
let args: AttrArgs = parse2(attr)?;
|
||||
let message_wrapper = args.message_wrapper;
|
||||
|
||||
let of: OverseerGuts = parse2(orig)?;
|
||||
|
||||
let support_crate_name = if cfg!(test) {
|
||||
quote! {crate}
|
||||
} else {
|
||||
use proc_macro_crate::{crate_name, FoundCrate};
|
||||
let crate_name = crate_name("polkadot-overseer-gen")
|
||||
.expect("Support crate polkadot-overseer-gen is present in `Cargo.toml`. qed");
|
||||
match crate_name {
|
||||
FoundCrate::Itself => quote! {crate},
|
||||
FoundCrate::Name(name) => Ident::new(&name, Span::call_site()).to_token_stream(),
|
||||
}
|
||||
};
|
||||
let info = OverseerInfo {
|
||||
support_crate_name,
|
||||
subsystems: of.subsystems,
|
||||
baggage: of.baggage,
|
||||
overseer_name: of.name,
|
||||
message_wrapper,
|
||||
message_channel_capacity: args.message_channel_capacity,
|
||||
signal_channel_capacity: args.signal_channel_capacity,
|
||||
extern_event_ty: args.extern_event_ty,
|
||||
extern_signal_ty: args.extern_signal_ty,
|
||||
extern_error_ty: args.extern_error_ty,
|
||||
extern_network_ty: args.extern_network_ty,
|
||||
outgoing_ty: args.outgoing_ty,
|
||||
};
|
||||
|
||||
let mut additive = impl_overseer_struct(&info);
|
||||
additive.extend(impl_builder(&info));
|
||||
|
||||
additive.extend(impl_overseen_subsystem(&info));
|
||||
additive.extend(impl_channels_out_struct(&info));
|
||||
additive.extend(impl_misc(&info));
|
||||
|
||||
additive.extend(impl_message_wrapper_enum(&info)?);
|
||||
additive.extend(impl_dispatch(&info));
|
||||
|
||||
let ts = expander::Expander::new("overlord-expansion")
|
||||
.add_comment("Generated overseer code by `#[overlord(..)]`".to_owned())
|
||||
.dry(!cfg!(feature = "expand"))
|
||||
.verbose(false)
|
||||
.fmt(expander::Edition::_2021)
|
||||
.write_to_out_dir(additive)
|
||||
.expect("Expander does not fail due to IO in OUT_DIR. qed");
|
||||
|
||||
Ok(ts)
|
||||
#[proc_macro_attribute]
|
||||
pub fn subsystem(
|
||||
attr: proc_macro::TokenStream,
|
||||
item: proc_macro::TokenStream,
|
||||
) -> proc_macro::TokenStream {
|
||||
let attr: TokenStream = attr.into();
|
||||
let item: TokenStream = item.into();
|
||||
impl_subsystem_context_trait_bounds(attr, item, MakeSubsystem::ImplSubsystemTrait)
|
||||
.unwrap_or_else(|err| err.to_compile_error())
|
||||
.into()
|
||||
}
|
||||
|
||||
#[proc_macro_attribute]
|
||||
pub fn contextbounds(
|
||||
attr: proc_macro::TokenStream,
|
||||
item: proc_macro::TokenStream,
|
||||
) -> proc_macro::TokenStream {
|
||||
let attr: TokenStream = attr.into();
|
||||
let item: TokenStream = item.into();
|
||||
impl_subsystem_context_trait_bounds(attr, item, MakeSubsystem::AddContextTraitBounds)
|
||||
.unwrap_or_else(|err| err.to_compile_error())
|
||||
.into()
|
||||
}
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
// Copyright 2022 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use proc_macro2::TokenStream;
|
||||
use syn::{parse2, Result};
|
||||
|
||||
use super::{parse::*, *};
|
||||
|
||||
pub(crate) fn impl_overseer_gen(
|
||||
attr: TokenStream,
|
||||
orig: TokenStream,
|
||||
) -> Result<proc_macro2::TokenStream> {
|
||||
let args: OverseerAttrArgs = parse2(attr)?;
|
||||
let message_wrapper = args.message_wrapper;
|
||||
|
||||
let of: OverseerGuts = parse2(orig)?;
|
||||
|
||||
let support_crate = support_crate().expect("The crate this macro is run for, includes the proc-macro support as dependency, otherwise it could not be run in the first place. qed");
|
||||
let info = OverseerInfo {
|
||||
support_crate,
|
||||
subsystems: of.subsystems,
|
||||
baggage: of.baggage,
|
||||
overseer_name: of.name,
|
||||
message_wrapper,
|
||||
message_channel_capacity: args.message_channel_capacity,
|
||||
signal_channel_capacity: args.signal_channel_capacity,
|
||||
extern_event_ty: args.extern_event_ty,
|
||||
extern_signal_ty: args.extern_signal_ty,
|
||||
extern_error_ty: args.extern_error_ty,
|
||||
outgoing_ty: args.outgoing_ty,
|
||||
};
|
||||
|
||||
let mut additive = impl_overseer_struct(&info);
|
||||
additive.extend(impl_builder(&info));
|
||||
|
||||
additive.extend(impl_overseen_subsystem(&info));
|
||||
additive.extend(impl_channels_out_struct(&info));
|
||||
additive.extend(impl_subsystem_types_all(&info)?);
|
||||
|
||||
additive.extend(impl_message_wrapper_enum(&info)?);
|
||||
|
||||
let ts = expander::Expander::new("overlord-expansion")
|
||||
.add_comment("Generated overseer code by `#[overlord(..)]`".to_owned())
|
||||
.dry(!cfg!(feature = "expand"))
|
||||
.verbose(true)
|
||||
// once all our needed format options are available on stable
|
||||
// we should enabled this again, until then too many warnings
|
||||
// are generated
|
||||
// .fmt(expander::Edition::_2021)
|
||||
.write_to_out_dir(additive)
|
||||
.expect("Expander does not fail due to IO in OUT_DIR. qed");
|
||||
|
||||
Ok(ts)
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
// Copyright 2022 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
mod kw {
|
||||
syn::custom_keyword!(event);
|
||||
syn::custom_keyword!(signal);
|
||||
syn::custom_keyword!(error);
|
||||
syn::custom_keyword!(outgoing);
|
||||
syn::custom_keyword!(gen);
|
||||
syn::custom_keyword!(signal_capacity);
|
||||
syn::custom_keyword!(message_capacity);
|
||||
syn::custom_keyword!(subsystem);
|
||||
syn::custom_keyword!(prefix);
|
||||
}
|
||||
|
||||
mod parse_overseer_attr;
|
||||
mod parse_overseer_struct;
|
||||
|
||||
mod parse_subsystem_attr;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub(crate) use self::{parse_overseer_attr::*, parse_overseer_struct::*};
|
||||
|
||||
pub(crate) use self::parse_subsystem_attr::*;
|
||||
+5
-31
@@ -14,6 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use super::kw;
|
||||
use proc_macro2::Span;
|
||||
use quote::{quote, ToTokens};
|
||||
use std::collections::{hash_map::RandomState, HashMap};
|
||||
@@ -24,21 +25,9 @@ use syn::{
|
||||
Error, Ident, LitInt, Path, Result, Token,
|
||||
};
|
||||
|
||||
mod kw {
|
||||
syn::custom_keyword!(event);
|
||||
syn::custom_keyword!(signal);
|
||||
syn::custom_keyword!(error);
|
||||
syn::custom_keyword!(network);
|
||||
syn::custom_keyword!(outgoing);
|
||||
syn::custom_keyword!(gen);
|
||||
syn::custom_keyword!(signal_capacity);
|
||||
syn::custom_keyword!(message_capacity);
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum OverseerAttrItem {
|
||||
ExternEventType { tag: kw::event, eq_token: Token![=], value: Path },
|
||||
ExternNetworkType { tag: kw::network, eq_token: Token![=], value: Path },
|
||||
ExternOverseerSignalType { tag: kw::signal, eq_token: Token![=], value: Path },
|
||||
ExternErrorType { tag: kw::error, eq_token: Token![=], value: Path },
|
||||
OutgoingType { tag: kw::outgoing, eq_token: Token![=], value: Path },
|
||||
@@ -53,9 +42,6 @@ impl ToTokens for OverseerAttrItem {
|
||||
Self::ExternEventType { tag, eq_token, value } => {
|
||||
quote! { #tag #eq_token, #value }
|
||||
},
|
||||
Self::ExternNetworkType { tag, eq_token, value } => {
|
||||
quote! { #tag #eq_token, #value }
|
||||
},
|
||||
Self::ExternOverseerSignalType { tag, eq_token, value } => {
|
||||
quote! { #tag #eq_token, #value }
|
||||
},
|
||||
@@ -100,12 +86,6 @@ impl Parse for OverseerAttrItem {
|
||||
eq_token: input.parse()?,
|
||||
value: input.parse()?,
|
||||
})
|
||||
} else if lookahead.peek(kw::network) {
|
||||
Ok(OverseerAttrItem::ExternNetworkType {
|
||||
tag: input.parse::<kw::network>()?,
|
||||
eq_token: input.parse()?,
|
||||
value: input.parse()?,
|
||||
})
|
||||
} else if lookahead.peek(kw::outgoing) {
|
||||
Ok(OverseerAttrItem::OutgoingType {
|
||||
tag: input.parse::<kw::outgoing>()?,
|
||||
@@ -138,15 +118,11 @@ impl Parse for OverseerAttrItem {
|
||||
|
||||
/// Attribute arguments
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AttrArgs {
|
||||
pub(crate) struct OverseerAttrArgs {
|
||||
pub(crate) message_wrapper: Ident,
|
||||
pub(crate) extern_event_ty: Path,
|
||||
pub(crate) extern_signal_ty: Path,
|
||||
pub(crate) extern_error_ty: Path,
|
||||
/// A external subsystem that both consumes and produces messages
|
||||
/// but is not part of the band of subsystems, it's a mere proxy
|
||||
/// to another entity that consumes/produces messages.
|
||||
pub(crate) extern_network_ty: Option<Path>,
|
||||
pub(crate) outgoing_ty: Option<Path>,
|
||||
pub(crate) signal_channel_capacity: usize,
|
||||
pub(crate) message_channel_capacity: usize,
|
||||
@@ -170,7 +146,7 @@ macro_rules! extract_variant {
|
||||
};
|
||||
}
|
||||
|
||||
impl Parse for AttrArgs {
|
||||
impl Parse for OverseerAttrArgs {
|
||||
fn parse(input: &ParseBuffer) -> Result<Self> {
|
||||
let items: Punctuated<OverseerAttrItem, Token![,]> =
|
||||
input.parse_terminated(OverseerAttrItem::parse)?;
|
||||
@@ -198,18 +174,16 @@ impl Parse for AttrArgs {
|
||||
|
||||
let error = extract_variant!(unique, ExternErrorType; err = "Must declare the overseer error type via `error=..`.")?;
|
||||
let event = extract_variant!(unique, ExternEventType; err = "Must declare the overseer event type via `event=..`.")?;
|
||||
let signal = extract_variant!(unique, ExternOverseerSignalType; err = "Must declare the overseer signal type via `span=..`.")?;
|
||||
let signal = extract_variant!(unique, ExternOverseerSignalType; err = "Must declare the overseer signal type via `signal=..`.")?;
|
||||
let message_wrapper = extract_variant!(unique, MessageWrapperName; err = "Must declare the overseer generated wrapping message type via `gen=..`.")?;
|
||||
let network = extract_variant!(unique, ExternNetworkType);
|
||||
let outgoing = extract_variant!(unique, OutgoingType);
|
||||
|
||||
Ok(AttrArgs {
|
||||
Ok(OverseerAttrArgs {
|
||||
signal_channel_capacity,
|
||||
message_channel_capacity,
|
||||
extern_event_ty: event,
|
||||
extern_signal_ty: signal,
|
||||
extern_error_ty: error,
|
||||
extern_network_ty: network,
|
||||
outgoing_ty: outgoing,
|
||||
message_wrapper,
|
||||
})
|
||||
+166
-72
@@ -17,32 +17,36 @@
|
||||
use proc_macro2::{Span, TokenStream};
|
||||
use std::collections::{hash_map::RandomState, HashMap, HashSet};
|
||||
use syn::{
|
||||
parenthesized,
|
||||
parse::{Parse, ParseStream},
|
||||
punctuated::Punctuated,
|
||||
spanned::Spanned,
|
||||
AttrStyle, Attribute, Error, Field, FieldsNamed, GenericParam, Ident, ItemStruct, Path, Result,
|
||||
Token, Type, Visibility,
|
||||
token::Bracket,
|
||||
AttrStyle, Error, Field, FieldsNamed, GenericParam, Ident, ItemStruct, Path, Result, Token,
|
||||
Type, Visibility,
|
||||
};
|
||||
|
||||
use quote::{quote, ToTokens};
|
||||
|
||||
mod kw {
|
||||
syn::custom_keyword!(wip);
|
||||
syn::custom_keyword!(no_dispatch);
|
||||
syn::custom_keyword!(blocking);
|
||||
syn::custom_keyword!(consumes);
|
||||
syn::custom_keyword!(sends);
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum SubSysAttrItem {
|
||||
pub(crate) enum SubSysAttrItem {
|
||||
/// The subsystem is still a work in progress
|
||||
/// and should not be communicated with.
|
||||
Wip(kw::wip),
|
||||
/// The subsystem is blocking and requires to be
|
||||
/// spawned on an exclusive thread.
|
||||
Blocking(kw::blocking),
|
||||
/// External messages should not be - after being converted -
|
||||
/// be dispatched to the annotated subsystem.
|
||||
NoDispatch(kw::no_dispatch),
|
||||
/// Message to be sent by this subsystem.
|
||||
Sends(Sends),
|
||||
/// Message to be consumed by this subsystem.
|
||||
Consumes(Consumes),
|
||||
}
|
||||
|
||||
impl Parse for SubSysAttrItem {
|
||||
@@ -52,10 +56,10 @@ impl Parse for SubSysAttrItem {
|
||||
Self::Wip(input.parse::<kw::wip>()?)
|
||||
} else if lookahead.peek(kw::blocking) {
|
||||
Self::Blocking(input.parse::<kw::blocking>()?)
|
||||
} else if lookahead.peek(kw::no_dispatch) {
|
||||
Self::NoDispatch(input.parse::<kw::no_dispatch>()?)
|
||||
} else if lookahead.peek(kw::sends) {
|
||||
Self::Sends(input.parse::<Sends>()?)
|
||||
} else {
|
||||
return Err(lookahead.error())
|
||||
Self::Consumes(input.parse::<Consumes>()?)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -69,8 +73,11 @@ impl ToTokens for SubSysAttrItem {
|
||||
Self::Blocking(blocking) => {
|
||||
quote! { #blocking }
|
||||
},
|
||||
Self::NoDispatch(no_dispatch) => {
|
||||
quote! { #no_dispatch }
|
||||
Self::Sends(_) => {
|
||||
quote! {}
|
||||
},
|
||||
Self::Consumes(_) => {
|
||||
quote! {}
|
||||
},
|
||||
};
|
||||
tokens.extend(ts.into_iter());
|
||||
@@ -78,7 +85,7 @@ impl ToTokens for SubSysAttrItem {
|
||||
}
|
||||
|
||||
/// A field of the struct annotated with
|
||||
/// `#[subsystem(no_dispatch, , A | B | C)]`
|
||||
/// `#[subsystem(A, B, C)]`
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct SubSysField {
|
||||
/// Name of the field.
|
||||
@@ -87,11 +94,10 @@ pub(crate) struct SubSysField {
|
||||
/// which is also used `#wrapper_message :: #variant` variant
|
||||
/// part.
|
||||
pub(crate) generic: Ident,
|
||||
/// Type to be consumed by the subsystem.
|
||||
pub(crate) consumes: Path,
|
||||
/// If `no_dispatch` is present, if the message is incoming via
|
||||
/// an `extern` `Event`, it will not be dispatched to all subsystems.
|
||||
pub(crate) no_dispatch: bool,
|
||||
/// Type of message to be consumed by the subsystem.
|
||||
pub(crate) message_to_consume: Path,
|
||||
/// Types of messages to be sent by the subsystem.
|
||||
pub(crate) messages_to_send: Vec<Path>,
|
||||
/// If the subsystem implementation is blocking execution and hence
|
||||
/// has to be spawned on a separate thread or thread pool.
|
||||
pub(crate) blocking: bool,
|
||||
@@ -115,6 +121,15 @@ macro_rules! extract_variant {
|
||||
($unique:expr, $variant:ident ; err = $err:expr) => {
|
||||
extract_variant!($unique, $variant).ok_or_else(|| Error::new(Span::call_site(), $err))
|
||||
};
|
||||
($unique:expr, $variant:ident take) => {
|
||||
$unique.values().find_map(|item| {
|
||||
if let SubSysAttrItem::$variant(value) = item {
|
||||
Some(value.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
};
|
||||
($unique:expr, $variant:ident) => {
|
||||
$unique.values().find_map(|item| {
|
||||
if let SubSysAttrItem::$variant(_) = item {
|
||||
@@ -126,57 +141,113 @@ macro_rules! extract_variant {
|
||||
};
|
||||
}
|
||||
|
||||
pub(crate) struct SubSystemTags {
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct Sends {
|
||||
#[allow(dead_code)]
|
||||
pub(crate) attrs: Vec<Attribute>,
|
||||
pub(crate) keyword_sends: kw::sends,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) no_dispatch: bool,
|
||||
/// The subsystem is in progress, only generate the `Wrapper` variant, but do not forward messages
|
||||
/// and also not include the subsystem in the list of subsystems.
|
||||
pub(crate) wip: bool,
|
||||
pub(crate) blocking: bool,
|
||||
pub(crate) colon: Token![:],
|
||||
#[allow(dead_code)]
|
||||
pub(crate) bracket: Option<Bracket>,
|
||||
pub(crate) sends: Punctuated<Path, Token![,]>,
|
||||
}
|
||||
|
||||
impl Parse for Sends {
|
||||
fn parse(input: syn::parse::ParseStream) -> Result<Self> {
|
||||
let content;
|
||||
let keyword_sends = input.parse()?;
|
||||
let colon = input.parse()?;
|
||||
let (bracket, sends) = if !input.peek(syn::token::Bracket) {
|
||||
let mut sends = Punctuated::new();
|
||||
sends.push_value(input.parse::<Path>()?);
|
||||
(None, sends)
|
||||
} else {
|
||||
let bracket = Some(syn::bracketed!(content in input));
|
||||
let sends = Punctuated::parse_terminated(&content)?;
|
||||
(bracket, sends)
|
||||
};
|
||||
Ok(Self { keyword_sends, colon, bracket, sends })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct Consumes {
|
||||
#[allow(dead_code)]
|
||||
pub(crate) keyword_consumes: Option<kw::consumes>,
|
||||
#[allow(dead_code)]
|
||||
pub(crate) colon: Option<Token![:]>,
|
||||
pub(crate) consumes: Path,
|
||||
}
|
||||
|
||||
impl Parse for SubSystemTags {
|
||||
impl Parse for Consumes {
|
||||
fn parse(input: syn::parse::ParseStream) -> Result<Self> {
|
||||
let attrs = Attribute::parse_outer(input)?;
|
||||
let lookahead = input.lookahead1();
|
||||
Ok(if lookahead.peek(kw::consumes) {
|
||||
Self {
|
||||
keyword_consumes: Some(input.parse()?),
|
||||
colon: input.parse()?,
|
||||
consumes: input.parse()?,
|
||||
}
|
||||
} else {
|
||||
Self { keyword_consumes: None, colon: None, consumes: input.parse()? }
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses `(Foo, sends = [Bar, Baz])`
|
||||
/// including the `(` and `)`.
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct SubSystemAttrItems {
|
||||
/// The subsystem is in progress, only generate the `Wrapper` variant, but do not forward messages
|
||||
/// and also not include the subsystem in the list of subsystems.
|
||||
pub(crate) wip: bool,
|
||||
/// If there are blocking components in the subsystem and hence it should be
|
||||
/// spawned on a dedicated thread pool for such subssytems.
|
||||
pub(crate) blocking: bool,
|
||||
/// The message type being consumed by the subsystem.
|
||||
pub(crate) consumes: Option<Consumes>,
|
||||
pub(crate) sends: Option<Sends>,
|
||||
}
|
||||
|
||||
impl Parse for SubSystemAttrItems {
|
||||
fn parse(input: syn::parse::ParseStream) -> Result<Self> {
|
||||
let span = input.span();
|
||||
|
||||
let input = input;
|
||||
let content;
|
||||
let _ = syn::parenthesized!(content in input);
|
||||
let _paren_token = parenthesized!(content in input);
|
||||
|
||||
let mut items = Punctuated::new();
|
||||
while let Ok(tag) = content.call(SubSysAttrItem::parse) {
|
||||
items.push_value(tag);
|
||||
items.push_punct(content.call(<Token![,]>::parse)?);
|
||||
}
|
||||
|
||||
assert!(items.empty_or_trailing(), "Always followed by the message type to consume. qed");
|
||||
|
||||
let consumes = content.parse::<Path>()?;
|
||||
let items = content.call(Punctuated::<SubSysAttrItem, Token![,]>::parse_terminated)?;
|
||||
|
||||
let mut unique = HashMap::<
|
||||
std::mem::Discriminant<SubSysAttrItem>,
|
||||
SubSysAttrItem,
|
||||
RandomState,
|
||||
>::default();
|
||||
|
||||
for item in items {
|
||||
if let Some(first) = unique.insert(std::mem::discriminant(&item), item.clone()) {
|
||||
let mut e = Error::new(
|
||||
item.span(),
|
||||
format!("Duplicate definition of subsystem attribute found"),
|
||||
);
|
||||
let mut e =
|
||||
Error::new(item.span(), "Duplicate definition of subsystem attribute found");
|
||||
e.combine(Error::new(first.span(), "previously defined here."));
|
||||
return Err(e)
|
||||
}
|
||||
}
|
||||
|
||||
let no_dispatch = extract_variant!(unique, NoDispatch; default = false);
|
||||
// A subsystem makes no sense if not one of them is provided
|
||||
let sends = extract_variant!(unique, Sends take);
|
||||
let consumes = extract_variant!(unique, Consumes take);
|
||||
if sends.as_ref().map(|sends| sends.sends.is_empty()).unwrap_or(true) && consumes.is_none()
|
||||
{
|
||||
return Err(Error::new(
|
||||
span,
|
||||
"Must have at least one of `consumes: [..]` and `sends: [..]`.",
|
||||
))
|
||||
}
|
||||
|
||||
let blocking = extract_variant!(unique, Blocking; default = false);
|
||||
let wip = extract_variant!(unique, Wip; default = false);
|
||||
|
||||
Ok(Self { attrs, no_dispatch, blocking, consumes, wip })
|
||||
Ok(Self { blocking, wip, sends, consumes })
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,7 +263,7 @@ pub(crate) struct BaggageField {
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct OverseerInfo {
|
||||
/// Where the support crate `::polkadot_overseer_gen` lives.
|
||||
pub(crate) support_crate_name: TokenStream,
|
||||
pub(crate) support_crate: Path,
|
||||
|
||||
/// Fields annotated with `#[subsystem(..)]`.
|
||||
pub(crate) subsystems: Vec<SubSysField>,
|
||||
@@ -216,11 +287,8 @@ pub(crate) struct OverseerInfo {
|
||||
/// Incoming event type from the outer world, usually an external framework of some sort.
|
||||
pub(crate) extern_event_ty: Path,
|
||||
|
||||
/// Incoming event type from an external entity, commonly from the network.
|
||||
pub(crate) extern_network_ty: Option<Path>,
|
||||
|
||||
/// Type of messages that are sent to an external subsystem.
|
||||
/// Merely here to be included during generation of `message_wrapper` type.
|
||||
/// Merely here to be included during generation of `#message_wrapper` type.
|
||||
pub(crate) outgoing_ty: Option<Path>,
|
||||
|
||||
/// Incoming event type from the outer world, commonly from the network.
|
||||
@@ -228,8 +296,8 @@ pub(crate) struct OverseerInfo {
|
||||
}
|
||||
|
||||
impl OverseerInfo {
|
||||
pub(crate) fn support_crate_name(&self) -> &TokenStream {
|
||||
&self.support_crate_name
|
||||
pub(crate) fn support_crate_name(&self) -> &Path {
|
||||
&self.support_crate
|
||||
}
|
||||
|
||||
pub(crate) fn variant_names(&self) -> Vec<Ident> {
|
||||
@@ -297,8 +365,11 @@ impl OverseerInfo {
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
pub(crate) fn consumes(&self) -> Vec<Path> {
|
||||
self.subsystems.iter().map(|ssf| ssf.consumes.clone()).collect::<Vec<_>>()
|
||||
pub(crate) fn any_message(&self) -> Vec<Path> {
|
||||
self.subsystems
|
||||
.iter()
|
||||
.map(|ssf| ssf.message_to_consume.clone())
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
pub(crate) fn channel_names_without_wip(&self, suffix: &'static str) -> Vec<Ident> {
|
||||
@@ -313,7 +384,7 @@ impl OverseerInfo {
|
||||
self.subsystems
|
||||
.iter()
|
||||
.filter(|ssf| !ssf.wip)
|
||||
.map(|ssf| ssf.consumes.clone())
|
||||
.map(|ssf| ssf.message_to_consume.clone())
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
}
|
||||
@@ -341,7 +412,8 @@ impl OverseerGuts {
|
||||
// for the builder pattern besides other places.
|
||||
let mut unique_subsystem_idents = HashSet::<Ident>::new();
|
||||
for Field { attrs, vis, ident, ty, .. } in fields.named.into_iter() {
|
||||
let mut consumes =
|
||||
// collect all subsystem annotations per field
|
||||
let mut subsystem_attr =
|
||||
attrs.iter().filter(|attr| attr.style == AttrStyle::Outer).filter_map(|attr| {
|
||||
let span = attr.path.span();
|
||||
attr.path.get_ident().filter(|ident| *ident == "subsystem").map(move |_ident| {
|
||||
@@ -349,53 +421,75 @@ impl OverseerGuts {
|
||||
(attr_tokens, span)
|
||||
})
|
||||
});
|
||||
let ident =
|
||||
ident.ok_or_else(|| Error::new(ty.span(), "Missing identifier for member. BUG"))?;
|
||||
let ident = ident.ok_or_else(|| {
|
||||
Error::new(
|
||||
ty.span(),
|
||||
"Missing identifier for field, only named fields are expceted.",
|
||||
)
|
||||
})?;
|
||||
|
||||
if let Some((attr_tokens, span)) = consumes.next() {
|
||||
if let Some((_attr_tokens2, span2)) = consumes.next() {
|
||||
// a `#[subsystem(..)]` annotation exists
|
||||
if let Some((attr_tokens, span)) = subsystem_attr.next() {
|
||||
if let Some((_attr_tokens2, span2)) = subsystem_attr.next() {
|
||||
return Err({
|
||||
let mut err = Error::new(span, "The first subsystem annotation is at");
|
||||
err.combine(Error::new(span2, "but another here for the same field."));
|
||||
err
|
||||
})
|
||||
}
|
||||
let mut consumes_paths = Vec::with_capacity(attrs.len());
|
||||
|
||||
let span = attr_tokens.span();
|
||||
|
||||
let attr_tokens = attr_tokens.clone();
|
||||
let variant: SubSystemTags = syn::parse2(attr_tokens.clone())?;
|
||||
consumes_paths.push(variant.consumes);
|
||||
let subsystem_attrs: SubSystemAttrItems = syn::parse2(attr_tokens.clone())?;
|
||||
|
||||
let field_ty = try_type_to_path(ty, span)?;
|
||||
let generic = field_ty
|
||||
.get_ident()
|
||||
.ok_or_else(|| {
|
||||
Error::new(field_ty.span(), "Must be an identifier, not a path.")
|
||||
Error::new(
|
||||
field_ty.span(),
|
||||
"Must be an identifier, not a path. It will be used as a generic.",
|
||||
)
|
||||
})?
|
||||
.clone();
|
||||
// check for unique subsystem name, otherwise we'd create invalid code:
|
||||
if let Some(previous) = unique_subsystem_idents.get(&generic) {
|
||||
let mut e = Error::new(
|
||||
generic.span(),
|
||||
format!("Duplicate subsystem names `{}`", generic),
|
||||
);
|
||||
let mut e = Error::new(generic.span(), "Duplicate subsystem names");
|
||||
e.combine(Error::new(previous.span(), "previously defined here."));
|
||||
return Err(e)
|
||||
}
|
||||
unique_subsystem_idents.insert(generic.clone());
|
||||
|
||||
let SubSystemAttrItems { wip, blocking, consumes, sends, .. } = subsystem_attrs;
|
||||
|
||||
// messages to be sent
|
||||
let sends = if let Some(sends) = sends {
|
||||
Vec::from_iter(sends.sends.iter().cloned())
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
// messages deemed for consumption
|
||||
let consumes = if let Some(consumes) = consumes {
|
||||
consumes.consumes
|
||||
} else {
|
||||
return Err(Error::new(span, "Must provide exactly one consuming message type"))
|
||||
};
|
||||
|
||||
subsystems.push(SubSysField {
|
||||
name: ident,
|
||||
generic,
|
||||
consumes: consumes_paths[0].clone(),
|
||||
no_dispatch: variant.no_dispatch,
|
||||
wip: variant.wip,
|
||||
blocking: variant.blocking,
|
||||
message_to_consume: consumes,
|
||||
messages_to_send: sends,
|
||||
wip,
|
||||
blocking,
|
||||
});
|
||||
} else {
|
||||
let field_ty = try_type_to_path(ty, ident.span())?;
|
||||
let generic = field_ty
|
||||
.get_ident()
|
||||
.map(|ident| baggage_generics.contains(ident))
|
||||
.unwrap_or_default();
|
||||
.unwrap_or(false);
|
||||
baggage.push(BaggageField { field_name: ident, generic, field_ty, vis });
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,144 @@
|
||||
// Copyright 2022 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use super::kw;
|
||||
use proc_macro2::Span;
|
||||
use quote::{quote, ToTokens};
|
||||
use std::collections::{hash_map::RandomState, HashMap};
|
||||
use syn::{
|
||||
parse::{Parse, ParseBuffer},
|
||||
punctuated::Punctuated,
|
||||
spanned::Spanned,
|
||||
Error, Ident, Path, Result, Token,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum SubsystemAttrItem {
|
||||
/// Error type provided by the user.
|
||||
Error { tag: kw::error, eq_token: Token![=], value: Path },
|
||||
/// For which slot in the overseer this should be plugged.
|
||||
///
|
||||
/// The subsystem implementation can and should have a different name
|
||||
/// from the declared parameter type in the overseer.
|
||||
Subsystem { tag: Option<kw::subsystem>, eq_token: Option<Token![=]>, value: Ident },
|
||||
/// The prefix to apply when a subsystem is implemented in a different file/crate
|
||||
/// than the overseer itself.
|
||||
///
|
||||
/// Important for `#[subsystem(..)]` to reference the traits correctly.
|
||||
TraitPrefix { tag: kw::prefix, eq_token: Token![=], value: Path },
|
||||
}
|
||||
|
||||
impl ToTokens for SubsystemAttrItem {
|
||||
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
|
||||
let ts = match self {
|
||||
Self::TraitPrefix { tag, eq_token, value } => {
|
||||
quote! { #tag #eq_token, #value }
|
||||
},
|
||||
Self::Error { tag, eq_token, value } => {
|
||||
quote! { #tag #eq_token, #value }
|
||||
},
|
||||
Self::Subsystem { tag, eq_token, value } => {
|
||||
quote! { #tag #eq_token, #value }
|
||||
},
|
||||
};
|
||||
tokens.extend(ts.into_iter());
|
||||
}
|
||||
}
|
||||
|
||||
impl Parse for SubsystemAttrItem {
|
||||
fn parse(input: &ParseBuffer) -> Result<Self> {
|
||||
let lookahead = input.lookahead1();
|
||||
if lookahead.peek(kw::error) {
|
||||
Ok(SubsystemAttrItem::Error {
|
||||
tag: input.parse::<kw::error>()?,
|
||||
eq_token: input.parse()?,
|
||||
value: input.parse()?,
|
||||
})
|
||||
} else if lookahead.peek(kw::prefix) {
|
||||
Ok(SubsystemAttrItem::TraitPrefix {
|
||||
tag: input.parse::<kw::prefix>()?,
|
||||
eq_token: input.parse()?,
|
||||
value: input.parse()?,
|
||||
})
|
||||
} else if lookahead.peek(kw::subsystem) {
|
||||
Ok(SubsystemAttrItem::Subsystem {
|
||||
tag: Some(input.parse::<kw::subsystem>()?),
|
||||
eq_token: Some(input.parse()?),
|
||||
value: input.parse()?,
|
||||
})
|
||||
} else {
|
||||
Ok(SubsystemAttrItem::Subsystem { tag: None, eq_token: None, value: input.parse()? })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attribute arguments `$args` in `#[subsystem( $args )]`.
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct SubsystemAttrArgs {
|
||||
span: Span,
|
||||
pub(crate) error_path: Option<Path>,
|
||||
pub(crate) subsystem_ident: Ident,
|
||||
pub(crate) trait_prefix_path: Option<Path>,
|
||||
}
|
||||
|
||||
impl Spanned for SubsystemAttrArgs {
|
||||
fn span(&self) -> Span {
|
||||
self.span.clone()
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! extract_variant {
|
||||
($unique:expr, $variant:ident ; default = $fallback:expr) => {
|
||||
extract_variant!($unique, $variant).unwrap_or_else(|| $fallback)
|
||||
};
|
||||
($unique:expr, $variant:ident ; err = $err:expr) => {
|
||||
extract_variant!($unique, $variant).ok_or_else(|| Error::new(Span::call_site(), $err))
|
||||
};
|
||||
($unique:expr, $variant:ident) => {
|
||||
$unique.values().find_map(|item| match item {
|
||||
SubsystemAttrItem::$variant { value, .. } => Some(value.clone()),
|
||||
_ => None,
|
||||
})
|
||||
};
|
||||
}
|
||||
|
||||
impl Parse for SubsystemAttrArgs {
|
||||
fn parse(input: &ParseBuffer) -> Result<Self> {
|
||||
let span = input.span();
|
||||
let items: Punctuated<SubsystemAttrItem, Token![,]> =
|
||||
input.parse_terminated(SubsystemAttrItem::parse)?;
|
||||
|
||||
let mut unique = HashMap::<
|
||||
std::mem::Discriminant<SubsystemAttrItem>,
|
||||
SubsystemAttrItem,
|
||||
RandomState,
|
||||
>::default();
|
||||
for item in items {
|
||||
if let Some(first) = unique.insert(std::mem::discriminant(&item), item.clone()) {
|
||||
let mut e = Error::new(
|
||||
item.span(),
|
||||
format!("Duplicate definition of subsystem generation type found"),
|
||||
);
|
||||
e.combine(Error::new(first.span(), "previously defined here."));
|
||||
return Err(e)
|
||||
}
|
||||
}
|
||||
let error_path = extract_variant!(unique, Error);
|
||||
let subsystem_ident = extract_variant!(unique, Subsystem; err = "Must annotate the identical overseer error type via `subsystem=..` or plainly as `Subsystem` as specified in the overseer declaration.")?;
|
||||
let trait_prefix_path = extract_variant!(unique, TraitPrefix);
|
||||
Ok(SubsystemAttrArgs { span, error_path, subsystem_ident, trait_prefix_path })
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,295 @@
|
||||
// Copyright 2022 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use super::*;
|
||||
use crate::{SubSysAttrItem, SubSystemAttrItems};
|
||||
use assert_matches::assert_matches;
|
||||
use quote::quote;
|
||||
use syn::parse_quote;
|
||||
|
||||
mod attr {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn attr_full_works() {
|
||||
let attr: OverseerAttrArgs = parse_quote! {
|
||||
gen=AllMessage, event=::some::why::ExternEvent, signal=SigSigSig, signal_capacity=111, message_capacity=222,
|
||||
error=OverseerError,
|
||||
};
|
||||
assert_matches!(attr, OverseerAttrArgs {
|
||||
message_channel_capacity,
|
||||
signal_channel_capacity,
|
||||
..
|
||||
} => {
|
||||
assert_eq!(message_channel_capacity, 222);
|
||||
assert_eq!(signal_channel_capacity, 111);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn attr_partial_works() {
|
||||
let attr: OverseerAttrArgs = parse_quote! {
|
||||
gen=AllMessage, event=::some::why::ExternEvent, signal=::foo::SigSigSig,
|
||||
error=OverseerError,
|
||||
};
|
||||
assert_matches!(attr, OverseerAttrArgs {
|
||||
message_channel_capacity: _,
|
||||
signal_channel_capacity: _,
|
||||
..
|
||||
} => {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
mod strukt {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attr_item_works_00_wip() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSysAttrItem>(quote! {
|
||||
wip
|
||||
}), Ok(SubSysAttrItem::Wip(_)) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attr_item_works_02_sends() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSysAttrItem>(quote! {
|
||||
sends: [A, B, C]
|
||||
}), Ok(SubSysAttrItem::Sends(sends)) => {
|
||||
assert_eq!(sends.sends.len(), 3);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attr_item_works_03_sends() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSysAttrItem>(quote! {
|
||||
sends: [A]
|
||||
}), Ok(SubSysAttrItem::Sends(sends)) => {
|
||||
assert_eq!(sends.sends.len(), 1);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attr_item_works_04_sends() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSysAttrItem>(quote! {
|
||||
sends: [A,]
|
||||
}), Ok(SubSysAttrItem::Sends(sends)) => {
|
||||
assert_eq!(sends.sends.len(), 1);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attr_item_works_05_sends() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSysAttrItem>(quote! {
|
||||
sends: []
|
||||
}), Ok(SubSysAttrItem::Sends(sends)) => {
|
||||
assert_eq!(sends.sends.len(), 0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attr_item_works_06_consumes() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSysAttrItem>(quote! {
|
||||
consumes: Foo
|
||||
}), Ok(SubSysAttrItem::Consumes(_consumes)) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attr_item_works_07_consumes() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSysAttrItem>(quote! {
|
||||
Foo
|
||||
}), Ok(SubSysAttrItem::Consumes(_consumes)) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_00() {
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(wip, blocking, consumes: Foo, sends: [])
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_01() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(blocking, Foo, sends: [])
|
||||
}), Ok(_) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_02() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(consumes: Foo, sends: [Bar])
|
||||
}), Ok(_) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_03() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(blocking, consumes: Foo, sends: [Bar])
|
||||
}), Ok(_) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_04() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(wip, consumes: Foo, sends: [Bar])
|
||||
}), Ok(_) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_05() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(consumes: Foo)
|
||||
}), Ok(_) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_06() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(sends: [Foo], consumes: Bar)
|
||||
}), Ok(_) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_07_duplicate_send() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(sends: [Foo], Bar, Y)
|
||||
}), Err(e) => {
|
||||
dbg!(e)
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_08() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(sends: [Foo], consumes: Bar)
|
||||
}), Ok(_) => {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_09_neither_consumes_nor_sends() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(sends: [])
|
||||
}), Err(e) => {
|
||||
// must either consume smth or sends smth, neither is NOK
|
||||
dbg!(e)
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_10_empty_with_braces() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
()
|
||||
}), Err(e) => {
|
||||
dbg!(e)
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_11_empty() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
|
||||
}), Err(e) => {
|
||||
dbg!(e)
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_subsystem_attributes_works_12_duplicate_consumes_different_fmt() {
|
||||
assert_matches!(
|
||||
syn::parse2::<SubSystemAttrItems>(quote! {
|
||||
(Foo, consumes = Foo)
|
||||
}), Err(e) => {
|
||||
dbg!(e)
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn struct_parse_baggage() {
|
||||
let item: OverseerGuts = parse_quote! {
|
||||
pub struct Ooooh<X = Pffffffft> where X: Secrit {
|
||||
#[subsystem(consumes: Foo, sends: [])]
|
||||
sub0: FooSubsystem,
|
||||
|
||||
metrics: Metrics,
|
||||
}
|
||||
};
|
||||
let _ = dbg!(item);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn struct_parse_full() {
|
||||
let item: OverseerGuts = parse_quote! {
|
||||
pub struct Ooooh<X = Pffffffft> where X: Secrit {
|
||||
#[subsystem(consumes: Foo, sends: [])]
|
||||
sub0: FooSubsystem,
|
||||
|
||||
#[subsystem(blocking, consumes: Bar, sends: [])]
|
||||
yyy: BaersBuyBilliardBalls,
|
||||
|
||||
#[subsystem(blocking, consumes: Twain, sends: [])]
|
||||
fff: Beeeeep,
|
||||
|
||||
#[subsystem(consumes: Rope)]
|
||||
mc: MountainCave,
|
||||
|
||||
metrics: Metrics,
|
||||
}
|
||||
};
|
||||
let _ = dbg!(item);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn struct_parse_basic() {
|
||||
let item: OverseerGuts = parse_quote! {
|
||||
pub struct Ooooh {
|
||||
#[subsystem(consumes: Foo, sends: [])]
|
||||
sub0: FooSubsystem,
|
||||
}
|
||||
};
|
||||
let _ = dbg!(item);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,310 @@
|
||||
// Copyright 2022 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Polkadot.
|
||||
|
||||
// Polkadot is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Polkadot is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Generates the bounds for a particular subsystem `Context` and associate `type Sender`.
|
||||
//!
|
||||
//!
|
||||
//! ## Implement `trait Subsystem<Context, Error>` via `subsystem`
|
||||
//!
|
||||
//! ```ignore
|
||||
//! # use polkadot_overseer_gen_proc_macro::subsystem;
|
||||
//! # mod somewhere {
|
||||
//! # use polkadot_overseer_gen_proc_macro::overlord;
|
||||
//! # pub use polkadot_overseer_gen::*;
|
||||
//! #
|
||||
//! # #[derive(Debug, thiserror::Error)]
|
||||
//! # #[error("Yikes!")]
|
||||
//! # pub struct Yikes;
|
||||
//! # impl From<OverseerError> for Yikes {
|
||||
//! # fn from(_: OverseerError) -> Yikes { Yikes }
|
||||
//! # }
|
||||
//! # impl From<mpsc::SendError> for Yikes {
|
||||
//! # fn from(_: mpsc::SendError) -> Yikes { Yikes }
|
||||
//! # }
|
||||
//! #
|
||||
//! # #[derive(Debug)]
|
||||
//! # pub struct Eve;
|
||||
//! #
|
||||
//! # #[derive(Debug, Clone)]
|
||||
//! # pub struct Sig;
|
||||
//! #
|
||||
//! # #[derive(Debug, Clone, Copy)]
|
||||
//! # pub struct A;
|
||||
//! # #[derive(Debug, Clone, Copy)]
|
||||
//! # pub struct B;
|
||||
//! #
|
||||
//! # #[overlord(signal=Sig, gen=AllOfThem, event=Eve, error=Yikes)]
|
||||
//! # pub struct Wonderland {
|
||||
//! # #[subsystem(A, sends: [B])]
|
||||
//! # foo: Foo,
|
||||
//! # #[subsystem(B, sends: [A])]
|
||||
//! # bar: Bar,
|
||||
//! # }
|
||||
//! # }
|
||||
//! # use somewhere::{Yikes, SpawnedSubsystem};
|
||||
//! #
|
||||
//! # struct FooSubsystem;
|
||||
//! #
|
||||
//! #[subsystem(Foo, error = Yikes, prefix = somewhere)]
|
||||
//! impl<Context> FooSubsystem {
|
||||
//! fn start(self, context: Context) -> SpawnedSubsystem<Yikes> {
|
||||
//! // ..
|
||||
//! # let _ = context;
|
||||
//! # unimplemented!()
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! expands to
|
||||
//!
|
||||
//! ```ignore
|
||||
//! # use polkadot_overseer_gen_proc_macro::subsystem;
|
||||
//! # mod somewhere {
|
||||
//! # use polkadot_overseer_gen_proc_macro::overlord;
|
||||
//! # pub use polkadot_overseer_gen::*;
|
||||
//! #
|
||||
//! # #[derive(Debug, thiserror::Error)]
|
||||
//! # #[error("Yikes!")]
|
||||
//! # pub struct Yikes;
|
||||
//! # impl From<OverseerError> for Yikes {
|
||||
//! # fn from(_: OverseerError) -> Yikes { Yikes }
|
||||
//! # }
|
||||
//! # impl From<mpsc::SendError> for Yikes {
|
||||
//! # fn from(_: mpsc::SendError) -> Yikes { Yikes }
|
||||
//! # }
|
||||
//! #
|
||||
//! # #[derive(Debug)]
|
||||
//! # pub struct Eve;
|
||||
//! #
|
||||
//! # #[derive(Debug, Clone)]
|
||||
//! # pub struct Sig;
|
||||
//! #
|
||||
//! # #[derive(Debug, Clone, Copy)]
|
||||
//! # pub struct A;
|
||||
//! # #[derive(Debug, Clone, Copy)]
|
||||
//! # pub struct B;
|
||||
//! #
|
||||
//! # #[overlord(signal=Sig, gen=AllOfThem, event=Eve, error=Yikes)]
|
||||
//! # pub struct Wonderland {
|
||||
//! # #[subsystem(A, sends: [B])]
|
||||
//! # foo: Foo,
|
||||
//! # #[subsystem(B, sends: [A])]
|
||||
//! # bar: Bar,
|
||||
//! # }
|
||||
//! # }
|
||||
//! # use somewhere::{Yikes, SpawnedSubsystem};
|
||||
//! # use polkadot_overseer_gen as support_crate;
|
||||
//! #
|
||||
//! # struct FooSubsystem;
|
||||
//! #
|
||||
//! impl<Context> support_crate::Subsystem<Context, Yikes> for FooSubsystem
|
||||
//! where
|
||||
//! Context: somewhere::FooContextTrait,
|
||||
//! Context: support_crate::SubsystemContext,
|
||||
//! <Context as somewhere::FooContextTrait>::Sender: somewhere::FooSenderTrait,
|
||||
//! <Context as support_crate::SubsystemContext>::Sender: somewhere::FooSenderTrait,
|
||||
//! {
|
||||
//! fn start(self, context: Context) -> SpawnedSubsystem<Yikes> {
|
||||
//! // ..
|
||||
//! # let _ = context;
|
||||
//! # unimplemented!()
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! where `support_crate` is either equivalent to `somewhere` or derived from the cargo manifest.
|
||||
//!
|
||||
//!
|
||||
//! ## Add additional trait bounds for a generic `Context` via `contextbounds`
|
||||
//!
|
||||
//! ### To an `ImplItem`
|
||||
//!
|
||||
//! ```ignore
|
||||
//! #[contextbounds(Foo, prefix = somewhere)]
|
||||
//! impl<Context> X {
|
||||
//! ..
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! expands to
|
||||
//!
|
||||
//! ```ignore
|
||||
//! impl<Context> X
|
||||
//! where
|
||||
//! Context: somewhere::FooSubsystemTrait,
|
||||
//! Context: support_crate::SubsystemContext,
|
||||
//! <Context as somewhere::FooContextTrait>::Sender: somewhere::FooSenderTrait,
|
||||
//! <Context as support_crate::SubsystemContext>::Sender: somewhere::FooSenderTrait,
|
||||
//! {
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ### To a free standing `Fn` (not a method, that's covered by the above)
|
||||
//!
|
||||
//! ```ignore
|
||||
//! #[contextbounds(Foo, prefix = somewhere)]
|
||||
//! fn do_smth<Context>(context: &mut Context) {
|
||||
//! ..
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! expands to
|
||||
//!
|
||||
//! ```ignore
|
||||
//! fn do_smth<Context>(context: &mut Context)
|
||||
//! where
|
||||
//! Context: somewhere::FooSubsystemTrait,
|
||||
//! Context: support_crate::SubsystemContext,
|
||||
//! <Context as somewhere::FooContextTrait>::Sender: somewhere::FooSenderTrait,
|
||||
//! <Context as support_crate::SubsystemContext>::Sender: somewhere::FooSenderTrait,
|
||||
//! {
|
||||
//! }
|
||||
//! ```
|
||||
use proc_macro2::TokenStream;
|
||||
use quote::{format_ident, ToTokens};
|
||||
use syn::{parse2, parse_quote, punctuated::Punctuated, Result};
|
||||
|
||||
use super::{parse::*, *};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum MakeSubsystem {
|
||||
/// Implements `trait Subsystem` and apply the trait bounds to the `Context` generic.
|
||||
///
|
||||
/// Relevant to `impl Item` only.
|
||||
ImplSubsystemTrait,
|
||||
/// Only apply the trait bounds to the context.
|
||||
AddContextTraitBounds,
|
||||
}
|
||||
|
||||
pub(crate) fn impl_subsystem_context_trait_bounds(
|
||||
attr: TokenStream,
|
||||
orig: TokenStream,
|
||||
make_subsystem: MakeSubsystem,
|
||||
) -> Result<proc_macro2::TokenStream> {
|
||||
let args = parse2::<SubsystemAttrArgs>(attr.clone())?;
|
||||
let span = args.span();
|
||||
let SubsystemAttrArgs { error_path, subsystem_ident, trait_prefix_path, .. } = args;
|
||||
|
||||
let mut item = parse2::<syn::Item>(orig)?;
|
||||
|
||||
// always prefer the direct usage, if it's not there, let's see if there is
|
||||
// a `prefix=*` provided. Either is ok.
|
||||
|
||||
// Technically this is two different things:
|
||||
// The place where the `#[overlord]` is annotated is where all `trait *SenderTrait` and
|
||||
// `trait *ContextTrait` types exist.
|
||||
// The other usage is the true support crate `polkadot-overseer-gen`, where the static ones
|
||||
// are declared.
|
||||
// Right now, if the `support_crate` is not included, it falls back silently to the `trait_prefix_path`.
|
||||
let support_crate = support_crate()
|
||||
.or_else(|_e| {
|
||||
trait_prefix_path.clone().ok_or_else(|| {
|
||||
syn::Error::new(attr.span(), "Couldn't find `polkadot-overseer-gen` in manifest, but also missing a `prefix=` to help trait bound resolution")
|
||||
})
|
||||
})?;
|
||||
|
||||
let trait_prefix_path = trait_prefix_path.unwrap_or_else(|| parse_quote! { self });
|
||||
if trait_prefix_path.segments.trailing_punct() {
|
||||
return Err(syn::Error::new(trait_prefix_path.span(), "Must not end with `::`"))
|
||||
}
|
||||
|
||||
let subsystem_ctx_trait = format_ident!("{}ContextTrait", subsystem_ident);
|
||||
let subsystem_sender_trait = format_ident!("{}SenderTrait", subsystem_ident);
|
||||
|
||||
let extra_where_predicates: Punctuated<syn::WherePredicate, syn::Token![,]> = parse_quote! {
|
||||
Context: #trait_prefix_path::#subsystem_ctx_trait,
|
||||
Context: #support_crate::SubsystemContext,
|
||||
<Context as #trait_prefix_path::#subsystem_ctx_trait>::Sender: #trait_prefix_path::#subsystem_sender_trait,
|
||||
<Context as #support_crate::SubsystemContext>::Sender: #trait_prefix_path::#subsystem_sender_trait,
|
||||
};
|
||||
|
||||
let apply_ctx_bound_if_present = move |generics: &mut syn::Generics| -> bool {
|
||||
if generics
|
||||
.params
|
||||
.iter()
|
||||
.find(|generic| match generic {
|
||||
syn::GenericParam::Type(ty) if ty.ident == "Context" => true,
|
||||
_ => false,
|
||||
})
|
||||
.is_some()
|
||||
{
|
||||
let where_clause = generics.make_where_clause();
|
||||
where_clause.predicates.extend(extra_where_predicates.clone());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
match item {
|
||||
syn::Item::Impl(ref mut struktured_impl) => {
|
||||
if make_subsystem == MakeSubsystem::ImplSubsystemTrait {
|
||||
let error_path = error_path.ok_or_else(|| {
|
||||
syn::Error::new(
|
||||
span,
|
||||
"Must annotate the identical overseer error type via `error=..`.",
|
||||
)
|
||||
})?;
|
||||
// Only replace the subsystem trait if it's desired.
|
||||
struktured_impl.trait_.replace((
|
||||
None,
|
||||
parse_quote! {
|
||||
#support_crate::Subsystem<Context, #error_path>
|
||||
},
|
||||
syn::token::For::default(),
|
||||
));
|
||||
}
|
||||
|
||||
apply_ctx_bound_if_present(&mut struktured_impl.generics);
|
||||
for item in struktured_impl.items.iter_mut() {
|
||||
match item {
|
||||
syn::ImplItem::Method(method) => {
|
||||
apply_ctx_bound_if_present(&mut method.sig.generics);
|
||||
},
|
||||
_others => {
|
||||
// don't error, just nop
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
syn::Item::Fn(ref mut struktured_fn) => {
|
||||
if make_subsystem == MakeSubsystem::ImplSubsystemTrait {
|
||||
return Err(syn::Error::new(struktured_fn.span(), "Cannot make a free function a subsystem, did you mean to apply `contextbound` instead?"))
|
||||
}
|
||||
apply_ctx_bound_if_present(&mut struktured_fn.sig.generics);
|
||||
},
|
||||
other =>
|
||||
return Err(syn::Error::new(
|
||||
other.span(),
|
||||
"Macro can only be annotated on functions or struct implementations",
|
||||
)),
|
||||
};
|
||||
|
||||
Ok(item.to_token_stream())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn is_path() {
|
||||
let _p: Path = parse_quote! { self };
|
||||
let _p: Path = parse_quote! { crate };
|
||||
let _p: Path = parse_quote! { ::foo };
|
||||
let _p: Path = parse_quote! { bar };
|
||||
}
|
||||
}
|
||||
@@ -32,13 +32,13 @@ fn print() {
|
||||
|
||||
let item = quote! {
|
||||
pub struct Ooooh<X = Pffffffft> where X: Secrit {
|
||||
#[subsystem(no_dispatch, Foo)]
|
||||
#[subsystem(Foo)]
|
||||
sub0: FooSubsystem,
|
||||
|
||||
#[subsystem(blocking, Bar)]
|
||||
yyy: BaersBuyBilliardBalls,
|
||||
|
||||
#[subsystem(no_dispatch, blocking, Twain)]
|
||||
#[subsystem(blocking, Twain)]
|
||||
fff: Beeeeep,
|
||||
|
||||
#[subsystem(Rope)]
|
||||
@@ -57,13 +57,13 @@ fn print() {
|
||||
fn struct_parse_full() {
|
||||
let item: OverseerGuts = parse_quote! {
|
||||
pub struct Ooooh<X = Pffffffft> where X: Secrit {
|
||||
#[subsystem(no_dispatch, Foo)]
|
||||
#[subsystem(Foo)]
|
||||
sub0: FooSubsystem,
|
||||
|
||||
#[subsystem(blocking, Bar)]
|
||||
yyy: BaersBuyBilliardBalls,
|
||||
|
||||
#[subsystem(no_dispatch, blocking, Twain)]
|
||||
#[subsystem(blocking, Twain)]
|
||||
fff: Beeeeep,
|
||||
|
||||
#[subsystem(Rope)]
|
||||
@@ -88,11 +88,11 @@ fn struct_parse_basic() {
|
||||
|
||||
#[test]
|
||||
fn attr_full() {
|
||||
let attr: AttrArgs = parse_quote! {
|
||||
let attr: OverseerAttrArgs = parse_quote! {
|
||||
gen=AllMessage, event=::some::why::ExternEvent, signal=SigSigSig, signal_capacity=111, message_capacity=222,
|
||||
error=OverseerError,
|
||||
};
|
||||
assert_matches!(attr, AttrArgs {
|
||||
assert_matches!(attr, OverseerAttrArgs {
|
||||
message_channel_capacity,
|
||||
signal_channel_capacity,
|
||||
..
|
||||
@@ -104,11 +104,11 @@ fn attr_full() {
|
||||
|
||||
#[test]
|
||||
fn attr_partial() {
|
||||
let attr: AttrArgs = parse_quote! {
|
||||
let attr: OverseerAttrArgs = parse_quote! {
|
||||
gen=AllMessage, event=::some::why::ExternEvent, signal=::foo::SigSigSig,
|
||||
error=OverseerError,
|
||||
};
|
||||
assert_matches!(attr, AttrArgs {
|
||||
assert_matches!(attr, OverseerAttrArgs {
|
||||
message_channel_capacity: _,
|
||||
signal_channel_capacity: _,
|
||||
..
|
||||
|
||||
@@ -60,12 +60,13 @@
|
||||
#![deny(missing_docs)]
|
||||
#![deny(unused_crate_dependencies)]
|
||||
|
||||
pub use polkadot_overseer_gen_proc_macro::overlord;
|
||||
pub use polkadot_overseer_gen_proc_macro::{contextbounds, overlord, subsystem};
|
||||
|
||||
#[doc(hidden)]
|
||||
pub use gum;
|
||||
#[doc(hidden)]
|
||||
pub use metered;
|
||||
|
||||
#[doc(hidden)]
|
||||
pub use polkadot_node_primitives::SpawnNamed;
|
||||
|
||||
@@ -101,7 +102,7 @@ use std::fmt;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// A type of messages that are sent from [`Subsystem`] to [`Overseer`].
|
||||
/// A type of messages that are sent from a [`Subsystem`] to the declared overseer.
|
||||
///
|
||||
/// Used to launch jobs.
|
||||
pub enum ToOverseer {
|
||||
@@ -312,7 +313,7 @@ pub struct SubsystemMeterReadouts {
|
||||
///
|
||||
/// [`Subsystem`]: trait.Subsystem.html
|
||||
///
|
||||
/// `M` here is the inner message type, and _not_ the generated `enum AllMessages`.
|
||||
/// `M` here is the inner message type, and _not_ the generated `enum AllMessages` or `#message_wrapper` type.
|
||||
pub struct SubsystemInstance<Message, Signal> {
|
||||
/// Send sink for `Signal`s to be sent to a subsystem.
|
||||
pub tx_signal: crate::metered::MeteredSender<Signal>,
|
||||
@@ -362,20 +363,23 @@ pub trait SubsystemContext: Send + 'static {
|
||||
/// The message type of this context. Subsystems launched with this context will expect
|
||||
/// to receive messages of this type. Commonly uses the wrapping `enum` commonly called
|
||||
/// `AllMessages`.
|
||||
type Message: std::fmt::Debug + Send + 'static;
|
||||
type Message: ::std::fmt::Debug + Send + 'static;
|
||||
/// And the same for signals.
|
||||
type Signal: std::fmt::Debug + Send + 'static;
|
||||
/// The overarching all messages `enum`.
|
||||
/// In some cases can be identical to `Self::Message`.
|
||||
type AllMessages: From<Self::Message> + Send + 'static;
|
||||
type Signal: ::std::fmt::Debug + Send + 'static;
|
||||
/// The overarching messages `enum` for this particular subsystem.
|
||||
type OutgoingMessages: ::std::fmt::Debug + Send + 'static;
|
||||
|
||||
// The overarching messages `enum` for this particular subsystem.
|
||||
// type AllMessages: From<Self::OutgoingMessages> + From<Self::Message> + std::fmt::Debug + Send + 'static;
|
||||
|
||||
/// The sender type as provided by `sender()` and underlying.
|
||||
type Sender: SubsystemSender<Self::AllMessages> + Send + 'static;
|
||||
type Sender: Clone + Send + 'static + SubsystemSender<Self::OutgoingMessages>;
|
||||
/// The error type.
|
||||
type Error: ::std::error::Error + ::std::convert::From<OverseerError> + Sync + Send + 'static;
|
||||
|
||||
/// Try to asynchronously receive a message.
|
||||
///
|
||||
/// This has to be used with caution, if you loop over this without
|
||||
/// Has to be used with caution, if you loop over this without
|
||||
/// using `pending!()` macro you will end up with a busy loop!
|
||||
async fn try_recv(&mut self) -> Result<Option<FromOverseer<Self::Message, Self::Signal>>, ()>;
|
||||
|
||||
@@ -397,34 +401,37 @@ pub trait SubsystemContext: Send + 'static {
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
/// Send a direct message to some other `Subsystem`, routed based on message type.
|
||||
async fn send_message<X>(&mut self, msg: X)
|
||||
// #[deprecated(note = "Use `self.sender().send_message(msg) instead, avoid passing around the full context.")]
|
||||
async fn send_message<T>(&mut self, msg: T)
|
||||
where
|
||||
Self::AllMessages: From<X>,
|
||||
X: Send,
|
||||
Self::OutgoingMessages: From<T> + Send,
|
||||
T: Send,
|
||||
{
|
||||
self.sender().send_message(<Self::AllMessages>::from(msg)).await
|
||||
self.sender().send_message(<Self::OutgoingMessages>::from(msg)).await
|
||||
}
|
||||
|
||||
/// Send multiple direct messages to other `Subsystem`s, routed based on message type.
|
||||
async fn send_messages<X, T>(&mut self, msgs: T)
|
||||
// #[deprecated(note = "Use `self.sender().send_message(msg) instead, avoid passing around the full context.")]
|
||||
async fn send_messages<T, I>(&mut self, msgs: I)
|
||||
where
|
||||
T: IntoIterator<Item = X> + Send,
|
||||
T::IntoIter: Send,
|
||||
Self::AllMessages: From<X>,
|
||||
X: Send,
|
||||
Self::OutgoingMessages: From<T> + Send,
|
||||
I: IntoIterator<Item = T> + Send,
|
||||
I::IntoIter: Send,
|
||||
T: Send,
|
||||
{
|
||||
self.sender()
|
||||
.send_messages(msgs.into_iter().map(|x| <Self::AllMessages>::from(x)))
|
||||
.send_messages(msgs.into_iter().map(<Self::OutgoingMessages>::from))
|
||||
.await
|
||||
}
|
||||
|
||||
/// Send a message using the unbounded connection.
|
||||
// #[deprecated(note = "Use `self.sender().send_unbounded_message(msg) instead, avoid passing around the full context.")]
|
||||
fn send_unbounded_message<X>(&mut self, msg: X)
|
||||
where
|
||||
Self::AllMessages: From<X>,
|
||||
Self::OutgoingMessages: From<X> + Send,
|
||||
X: Send,
|
||||
{
|
||||
self.sender().send_unbounded_message(Self::AllMessages::from(msg))
|
||||
self.sender().send_unbounded_message(<Self::OutgoingMessages>::from(msg))
|
||||
}
|
||||
|
||||
/// Obtain the sender.
|
||||
@@ -450,22 +457,25 @@ where
|
||||
|
||||
/// Sender end of a channel to interface with a subsystem.
|
||||
#[async_trait::async_trait]
|
||||
pub trait SubsystemSender<Message>: Send + Clone + 'static {
|
||||
pub trait SubsystemSender<OutgoingMessage>: Clone + Send + 'static
|
||||
where
|
||||
OutgoingMessage: Send,
|
||||
{
|
||||
/// Send a direct message to some other `Subsystem`, routed based on message type.
|
||||
async fn send_message(&mut self, msg: Message);
|
||||
async fn send_message(&mut self, msg: OutgoingMessage);
|
||||
|
||||
/// Send multiple direct messages to other `Subsystem`s, routed based on message type.
|
||||
async fn send_messages<T>(&mut self, msgs: T)
|
||||
async fn send_messages<I>(&mut self, msgs: I)
|
||||
where
|
||||
T: IntoIterator<Item = Message> + Send,
|
||||
T::IntoIter: Send;
|
||||
I: IntoIterator<Item = OutgoingMessage> + Send,
|
||||
I::IntoIter: Send;
|
||||
|
||||
/// Send a message onto the unbounded queue of some other `Subsystem`, routed based on message
|
||||
/// type.
|
||||
///
|
||||
/// This function should be used only when there is some other bounding factor on the messages
|
||||
/// sent with it. Otherwise, it risks a memory leak.
|
||||
fn send_unbounded_message(&mut self, msg: Message);
|
||||
fn send_unbounded_message(&mut self, msg: OutgoingMessage);
|
||||
}
|
||||
|
||||
/// A future that wraps another future with a `Delay` allowing for time-limited futures.
|
||||
|
||||
@@ -15,7 +15,7 @@ struct MsgStrukt(u8);
|
||||
|
||||
#[overlord(signal=SigSigSig, event=Event, gen=AllMessages)]
|
||||
struct Overseer {
|
||||
#[subsystem(no_dispatch, MsgStrukt)]
|
||||
#[subsystem(MsgStrukt)]
|
||||
sub0: AwesomeSubSys,
|
||||
|
||||
i_like_pie: f64,
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct MsgStrukt(u8);
|
||||
|
||||
#[overlord(signal=SigSigSig, error=OverseerError, event=Event, gen=AllMessages)]
|
||||
struct Overseer {
|
||||
#[subsystem(no_dispatch, MsgStrukt)]
|
||||
#[subsystem(MsgStrukt)]
|
||||
sub0: AwesomeSubSys,
|
||||
i_like_pie: f64,
|
||||
}
|
||||
@@ -58,4 +58,4 @@ fn main() {
|
||||
.spawner(DummySpawner)
|
||||
.build()
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct MsgStrukt(u8);
|
||||
|
||||
#[overlord(signal=SigSigSig, error=OverseerError, event=Event, gen=AllMessages)]
|
||||
struct Overseer {
|
||||
#[subsystem(no_dispatch, MsgStrukt)]
|
||||
#[subsystem(MsgStrukt)]
|
||||
sub0: AwesomeSubSys,
|
||||
i_like_pie: f64,
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct MsgStrukt(u8);
|
||||
|
||||
#[overlord(signal=SigSigSig, error=OverseerError, event=Event, gen=AllMessages)]
|
||||
struct Overseer {
|
||||
#[subsystem(no_dispatch, MsgStrukt)]
|
||||
#[subsystem(MsgStrukt)]
|
||||
sub0: AwesomeSubSys,
|
||||
i_like_pie: f64,
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct MsgStrukt(u8);
|
||||
|
||||
#[overlord(signal=SigSigSig, error=OverseerError, event=Event, gen=AllMessages)]
|
||||
struct Overseer {
|
||||
#[subsystem(no_dispatch, MsgStrukt)]
|
||||
#[subsystem(MsgStrukt)]
|
||||
sub0: AwesomeSubSys,
|
||||
i_like_pie: f64,
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ pub struct MsgStrukt(u8);
|
||||
|
||||
#[overlord(signal=SigSigSig, error=OverseerError, event=Event, gen=AllMessages)]
|
||||
struct Overseer<T> {
|
||||
#[subsystem(no_dispatch, MsgStrukt)]
|
||||
#[subsystem(MsgStrukt)]
|
||||
sub0: AwesomeSubSys,
|
||||
i_like_pie: T,
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use crate::{
|
||||
prometheus::Registry, AllMessages, HeadSupportsParachains, InitializedOverseerBuilder,
|
||||
MetricsTrait, Overseer, OverseerMetrics, OverseerSignal, OverseerSubsystemContext, SpawnNamed,
|
||||
prometheus::Registry, HeadSupportsParachains, InitializedOverseerBuilder, MetricsTrait,
|
||||
Overseer, OverseerMetrics, OverseerSignal, OverseerSubsystemContext, SpawnNamed,
|
||||
KNOWN_LEAVES_CACHE_SIZE,
|
||||
};
|
||||
use lru::LruCache;
|
||||
@@ -30,11 +30,7 @@ pub struct DummySubsystem;
|
||||
|
||||
impl<Context> Subsystem<Context, SubsystemError> for DummySubsystem
|
||||
where
|
||||
Context: SubsystemContext<
|
||||
Signal = OverseerSignal,
|
||||
Error = SubsystemError,
|
||||
AllMessages = AllMessages,
|
||||
>,
|
||||
Context: SubsystemContext<Signal = OverseerSignal, Error = SubsystemError>,
|
||||
{
|
||||
fn start(self, mut ctx: Context) -> SpawnedSubsystem<SubsystemError> {
|
||||
let future = Box::pin(async move {
|
||||
|
||||
@@ -77,15 +77,14 @@ use polkadot_primitives::{
|
||||
};
|
||||
use sp_api::{ApiExt, ProvideRuntimeApi};
|
||||
|
||||
use polkadot_node_network_protocol::VersionedValidationProtocol;
|
||||
use polkadot_node_subsystem_types::messages::{
|
||||
ApprovalDistributionMessage, ApprovalVotingMessage, AvailabilityDistributionMessage,
|
||||
AvailabilityRecoveryMessage, AvailabilityStoreMessage, BitfieldDistributionMessage,
|
||||
BitfieldSigningMessage, CandidateBackingMessage, CandidateValidationMessage, ChainApiMessage,
|
||||
ChainSelectionMessage, CollationGenerationMessage, CollatorProtocolMessage,
|
||||
DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage,
|
||||
NetworkBridgeEvent, NetworkBridgeMessage, ProvisionerMessage, PvfCheckerMessage,
|
||||
RuntimeApiMessage, StatementDistributionMessage,
|
||||
NetworkBridgeMessage, ProvisionerMessage, PvfCheckerMessage, RuntimeApiMessage,
|
||||
StatementDistributionMessage,
|
||||
};
|
||||
pub use polkadot_node_subsystem_types::{
|
||||
errors::{SubsystemError, SubsystemResult},
|
||||
@@ -108,9 +107,9 @@ use parity_util_mem::MemoryAllocationTracker;
|
||||
|
||||
pub use polkadot_overseer_gen as gen;
|
||||
pub use polkadot_overseer_gen::{
|
||||
overlord, FromOverseer, MapSubsystem, MessagePacket, SignalsReceived, SpawnNamed, Subsystem,
|
||||
SubsystemContext, SubsystemIncomingMessages, SubsystemInstance, SubsystemMeterReadouts,
|
||||
SubsystemMeters, SubsystemSender, TimeoutExt, ToOverseer,
|
||||
contextbounds, overlord, subsystem, FromOverseer, MapSubsystem, MessagePacket, SignalsReceived,
|
||||
SpawnNamed, Subsystem, SubsystemContext, SubsystemIncomingMessages, SubsystemInstance,
|
||||
SubsystemMeterReadouts, SubsystemMeters, SubsystemSender, TimeoutExt, ToOverseer,
|
||||
};
|
||||
|
||||
/// Store 2 days worth of blocks, not accounting for forks,
|
||||
@@ -414,71 +413,155 @@ pub async fn forward_events<P: BlockchainEvents<Block>>(client: Arc<P>, mut hand
|
||||
event=Event,
|
||||
signal=OverseerSignal,
|
||||
error=SubsystemError,
|
||||
network=NetworkBridgeEvent<VersionedValidationProtocol>,
|
||||
message_capacity=2048,
|
||||
)]
|
||||
pub struct Overseer<SupportsParachains> {
|
||||
#[subsystem(no_dispatch, CandidateValidationMessage)]
|
||||
#[subsystem(CandidateValidationMessage, sends: [
|
||||
RuntimeApiMessage,
|
||||
])]
|
||||
candidate_validation: CandidateValidation,
|
||||
|
||||
#[subsystem(no_dispatch, PvfCheckerMessage)]
|
||||
#[subsystem(PvfCheckerMessage, sends: [
|
||||
CandidateValidationMessage,
|
||||
RuntimeApiMessage,
|
||||
])]
|
||||
pvf_checker: PvfChecker,
|
||||
|
||||
#[subsystem(no_dispatch, CandidateBackingMessage)]
|
||||
#[subsystem(CandidateBackingMessage, sends: [
|
||||
CandidateValidationMessage,
|
||||
CollatorProtocolMessage,
|
||||
AvailabilityDistributionMessage,
|
||||
AvailabilityStoreMessage,
|
||||
StatementDistributionMessage,
|
||||
ProvisionerMessage,
|
||||
RuntimeApiMessage,
|
||||
DisputeCoordinatorMessage,
|
||||
])]
|
||||
candidate_backing: CandidateBacking,
|
||||
|
||||
#[subsystem(StatementDistributionMessage)]
|
||||
#[subsystem(StatementDistributionMessage, sends: [
|
||||
NetworkBridgeMessage,
|
||||
CandidateBackingMessage,
|
||||
RuntimeApiMessage,
|
||||
])]
|
||||
statement_distribution: StatementDistribution,
|
||||
|
||||
#[subsystem(no_dispatch, AvailabilityDistributionMessage)]
|
||||
#[subsystem(AvailabilityDistributionMessage, sends: [
|
||||
AvailabilityStoreMessage,
|
||||
AvailabilityRecoveryMessage,
|
||||
ChainApiMessage,
|
||||
RuntimeApiMessage,
|
||||
NetworkBridgeMessage,
|
||||
])]
|
||||
availability_distribution: AvailabilityDistribution,
|
||||
|
||||
#[subsystem(no_dispatch, AvailabilityRecoveryMessage)]
|
||||
#[subsystem(AvailabilityRecoveryMessage, sends: [
|
||||
NetworkBridgeMessage,
|
||||
RuntimeApiMessage,
|
||||
AvailabilityStoreMessage,
|
||||
])]
|
||||
availability_recovery: AvailabilityRecovery,
|
||||
|
||||
#[subsystem(blocking, no_dispatch, BitfieldSigningMessage)]
|
||||
#[subsystem(blocking, BitfieldSigningMessage, sends: [
|
||||
AvailabilityStoreMessage,
|
||||
RuntimeApiMessage,
|
||||
BitfieldDistributionMessage,
|
||||
])]
|
||||
bitfield_signing: BitfieldSigning,
|
||||
|
||||
#[subsystem(BitfieldDistributionMessage)]
|
||||
#[subsystem(BitfieldDistributionMessage, sends: [
|
||||
RuntimeApiMessage,
|
||||
NetworkBridgeMessage,
|
||||
ProvisionerMessage,
|
||||
])]
|
||||
bitfield_distribution: BitfieldDistribution,
|
||||
|
||||
#[subsystem(no_dispatch, ProvisionerMessage)]
|
||||
#[subsystem(ProvisionerMessage, sends: [
|
||||
RuntimeApiMessage,
|
||||
CandidateBackingMessage,
|
||||
ChainApiMessage,
|
||||
DisputeCoordinatorMessage,
|
||||
])]
|
||||
provisioner: Provisioner,
|
||||
|
||||
#[subsystem(no_dispatch, blocking, RuntimeApiMessage)]
|
||||
#[subsystem(blocking, RuntimeApiMessage, sends: [])]
|
||||
runtime_api: RuntimeApi,
|
||||
|
||||
#[subsystem(no_dispatch, blocking, AvailabilityStoreMessage)]
|
||||
#[subsystem(blocking, AvailabilityStoreMessage, sends: [
|
||||
ChainApiMessage,
|
||||
RuntimeApiMessage,
|
||||
])]
|
||||
availability_store: AvailabilityStore,
|
||||
|
||||
#[subsystem(no_dispatch, NetworkBridgeMessage)]
|
||||
#[subsystem(NetworkBridgeMessage, sends: [
|
||||
BitfieldDistributionMessage,
|
||||
StatementDistributionMessage,
|
||||
ApprovalDistributionMessage,
|
||||
GossipSupportMessage,
|
||||
DisputeDistributionMessage,
|
||||
CollationGenerationMessage,
|
||||
CollatorProtocolMessage,
|
||||
])]
|
||||
network_bridge: NetworkBridge,
|
||||
|
||||
#[subsystem(no_dispatch, blocking, ChainApiMessage)]
|
||||
#[subsystem(blocking, ChainApiMessage, sends: [])]
|
||||
chain_api: ChainApi,
|
||||
|
||||
#[subsystem(no_dispatch, CollationGenerationMessage)]
|
||||
#[subsystem(CollationGenerationMessage, sends: [
|
||||
RuntimeApiMessage,
|
||||
CollatorProtocolMessage,
|
||||
])]
|
||||
collation_generation: CollationGeneration,
|
||||
|
||||
#[subsystem(no_dispatch, CollatorProtocolMessage)]
|
||||
#[subsystem(CollatorProtocolMessage, sends: [
|
||||
NetworkBridgeMessage,
|
||||
RuntimeApiMessage,
|
||||
CandidateBackingMessage,
|
||||
])]
|
||||
collator_protocol: CollatorProtocol,
|
||||
|
||||
#[subsystem(ApprovalDistributionMessage)]
|
||||
#[subsystem(ApprovalDistributionMessage, sends: [
|
||||
NetworkBridgeMessage,
|
||||
ApprovalVotingMessage,
|
||||
])]
|
||||
approval_distribution: ApprovalDistribution,
|
||||
|
||||
#[subsystem(no_dispatch, blocking, ApprovalVotingMessage)]
|
||||
#[subsystem(blocking, ApprovalVotingMessage, sends: [
|
||||
RuntimeApiMessage,
|
||||
ChainApiMessage,
|
||||
ChainSelectionMessage,
|
||||
DisputeCoordinatorMessage,
|
||||
AvailabilityRecoveryMessage,
|
||||
ApprovalDistributionMessage,
|
||||
CandidateValidationMessage,
|
||||
])]
|
||||
approval_voting: ApprovalVoting,
|
||||
|
||||
#[subsystem(GossipSupportMessage)]
|
||||
#[subsystem(GossipSupportMessage, sends: [
|
||||
NetworkBridgeMessage,
|
||||
RuntimeApiMessage,
|
||||
ChainSelectionMessage,
|
||||
])]
|
||||
gossip_support: GossipSupport,
|
||||
|
||||
#[subsystem(no_dispatch, blocking, DisputeCoordinatorMessage)]
|
||||
#[subsystem(blocking, DisputeCoordinatorMessage, sends: [
|
||||
RuntimeApiMessage,
|
||||
ChainApiMessage,
|
||||
DisputeDistributionMessage,
|
||||
CandidateValidationMessage,
|
||||
AvailabilityStoreMessage,
|
||||
AvailabilityRecoveryMessage,
|
||||
])]
|
||||
dispute_coordinator: DisputeCoordinator,
|
||||
|
||||
#[subsystem(no_dispatch, DisputeDistributionMessage)]
|
||||
#[subsystem(DisputeDistributionMessage, sends: [
|
||||
RuntimeApiMessage,
|
||||
DisputeCoordinatorMessage,
|
||||
NetworkBridgeMessage,
|
||||
])]
|
||||
dispute_distribution: DisputeDistribution,
|
||||
|
||||
#[subsystem(no_dispatch, blocking, ChainSelectionMessage)]
|
||||
#[subsystem(blocking, ChainSelectionMessage, sends: [ChainApiMessage])]
|
||||
chain_selection: ChainSelection,
|
||||
|
||||
/// External listeners waiting for a hash to be in the active-leave set.
|
||||
|
||||
@@ -60,11 +60,7 @@ struct TestSubsystem1(metered::MeteredSender<usize>);
|
||||
|
||||
impl<C> overseer::Subsystem<C, SubsystemError> for TestSubsystem1
|
||||
where
|
||||
C: overseer::SubsystemContext<
|
||||
Message = CandidateValidationMessage,
|
||||
Signal = OverseerSignal,
|
||||
AllMessages = AllMessages,
|
||||
>,
|
||||
C: overseer::SubsystemContext<Message = CandidateValidationMessage, Signal = OverseerSignal>,
|
||||
{
|
||||
fn start(self, mut ctx: C) -> SpawnedSubsystem {
|
||||
let mut sender = self.0;
|
||||
@@ -95,8 +91,8 @@ impl<C> overseer::Subsystem<C, SubsystemError> for TestSubsystem2
|
||||
where
|
||||
C: overseer::SubsystemContext<
|
||||
Message = CandidateBackingMessage,
|
||||
OutgoingMessages = <CandidateBackingMessage as AssociateOutgoing>::OutgoingMessages,
|
||||
Signal = OverseerSignal,
|
||||
AllMessages = AllMessages,
|
||||
>,
|
||||
{
|
||||
fn start(self, mut ctx: C) -> SpawnedSubsystem {
|
||||
@@ -143,11 +139,7 @@ struct ReturnOnStart;
|
||||
|
||||
impl<C> overseer::Subsystem<C, SubsystemError> for ReturnOnStart
|
||||
where
|
||||
C: overseer::SubsystemContext<
|
||||
Message = CandidateBackingMessage,
|
||||
Signal = OverseerSignal,
|
||||
AllMessages = AllMessages,
|
||||
>,
|
||||
C: overseer::SubsystemContext<Message = CandidateBackingMessage, Signal = OverseerSignal>,
|
||||
{
|
||||
fn start(self, mut _ctx: C) -> SpawnedSubsystem {
|
||||
SpawnedSubsystem {
|
||||
@@ -316,11 +308,7 @@ struct TestSubsystem5(metered::MeteredSender<OverseerSignal>);
|
||||
|
||||
impl<C> overseer::Subsystem<C, SubsystemError> for TestSubsystem5
|
||||
where
|
||||
C: overseer::SubsystemContext<
|
||||
Message = CandidateValidationMessage,
|
||||
Signal = OverseerSignal,
|
||||
AllMessages = AllMessages,
|
||||
>,
|
||||
C: overseer::SubsystemContext<Message = CandidateValidationMessage, Signal = OverseerSignal>,
|
||||
{
|
||||
fn start(self, mut ctx: C) -> SpawnedSubsystem {
|
||||
let mut sender = self.0.clone();
|
||||
@@ -352,11 +340,7 @@ struct TestSubsystem6(metered::MeteredSender<OverseerSignal>);
|
||||
|
||||
impl<C> Subsystem<C, SubsystemError> for TestSubsystem6
|
||||
where
|
||||
C: overseer::SubsystemContext<
|
||||
Message = CandidateBackingMessage,
|
||||
Signal = OverseerSignal,
|
||||
AllMessages = AllMessages,
|
||||
>,
|
||||
C: overseer::SubsystemContext<Message = CandidateBackingMessage, Signal = OverseerSignal>,
|
||||
{
|
||||
fn start(self, mut ctx: C) -> SpawnedSubsystem {
|
||||
let mut sender = self.0.clone();
|
||||
@@ -761,7 +745,7 @@ impl CounterSubsystem {
|
||||
|
||||
impl<C, M> Subsystem<C, SubsystemError> for CounterSubsystem
|
||||
where
|
||||
C: overseer::SubsystemContext<Message = M, Signal = OverseerSignal, AllMessages = AllMessages>,
|
||||
C: overseer::SubsystemContext<Message = M, Signal = OverseerSignal>,
|
||||
M: Send,
|
||||
{
|
||||
fn start(self, mut ctx: C) -> SpawnedSubsystem {
|
||||
|
||||
@@ -86,7 +86,7 @@ pub const MAX_FINALITY_LAG: u32 = 500;
|
||||
/// We are not using `NonZeroU32` here because `expect` and `unwrap` are not yet const, so global
|
||||
/// constants of `SessionWindowSize` would require `lazy_static` in that case.
|
||||
///
|
||||
/// See: https://github.com/rust-lang/rust/issues/67441
|
||||
/// See: <https://github.com/rust-lang/rust/issues/67441>
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd)]
|
||||
pub struct SessionWindowSize(SessionIndex);
|
||||
|
||||
|
||||
@@ -83,6 +83,7 @@ polkadot-node-primitives = { path = "../primitives" }
|
||||
polkadot-rpc = { path = "../../rpc" }
|
||||
polkadot-node-subsystem = {path = "../subsystem" }
|
||||
polkadot-node-subsystem-util = { path = "../subsystem-util" }
|
||||
polkadot-node-subsystem-types = { path = "../subsystem-types" }
|
||||
polkadot-runtime-parachains = { path = "../../runtime/parachains" }
|
||||
polkadot-node-network-protocol = { path = "../network/protocol" }
|
||||
|
||||
|
||||
@@ -24,14 +24,16 @@ use polkadot_node_core_chain_selection::Config as ChainSelectionConfig;
|
||||
use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig;
|
||||
use polkadot_node_core_provisioner::ProvisionerConfig;
|
||||
use polkadot_node_network_protocol::request_response::{v1 as request_v1, IncomingRequestReceiver};
|
||||
use polkadot_node_subsystem_types::messages::{BitfieldSigningMessage, ProvisionerMessage};
|
||||
#[cfg(any(feature = "malus", test))]
|
||||
pub use polkadot_overseer::{
|
||||
dummy::{dummy_overseer_builder, DummySubsystem},
|
||||
HeadSupportsParachains,
|
||||
};
|
||||
use polkadot_overseer::{
|
||||
metrics::Metrics as OverseerMetrics, BlockInfo, InitializedOverseerBuilder, MetricsTrait,
|
||||
Overseer, OverseerConnector, OverseerHandle,
|
||||
gen::SubsystemContext, metrics::Metrics as OverseerMetrics, BlockInfo,
|
||||
InitializedOverseerBuilder, MetricsTrait, Overseer, OverseerConnector, OverseerHandle,
|
||||
OverseerSubsystemContext,
|
||||
};
|
||||
|
||||
use polkadot_primitives::runtime_api::ParachainHost;
|
||||
@@ -152,9 +154,15 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>(
|
||||
StatementDistributionSubsystem<rand::rngs::StdRng>,
|
||||
AvailabilityDistributionSubsystem,
|
||||
AvailabilityRecoverySubsystem,
|
||||
BitfieldSigningSubsystem<Spawner>,
|
||||
BitfieldSigningSubsystem<
|
||||
Spawner,
|
||||
<OverseerSubsystemContext<BitfieldSigningMessage> as SubsystemContext>::Sender,
|
||||
>,
|
||||
BitfieldDistributionSubsystem,
|
||||
ProvisionerSubsystem<Spawner>,
|
||||
ProvisionerSubsystem<
|
||||
Spawner,
|
||||
<OverseerSubsystemContext<ProvisionerMessage> as SubsystemContext>::Sender,
|
||||
>,
|
||||
RuntimeApiSubsystem<RuntimeClient>,
|
||||
AvailabilityStoreSubsystem,
|
||||
NetworkBridgeSubsystem<
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
use polkadot_node_subsystem::{
|
||||
messages::AllMessages, overseer, FromOverseer, OverseerSignal, SpawnedSubsystem,
|
||||
SubsystemContext, SubsystemError, SubsystemResult,
|
||||
SubsystemError, SubsystemResult,
|
||||
};
|
||||
use polkadot_node_subsystem_util::TimeoutExt;
|
||||
|
||||
@@ -150,24 +150,25 @@ pub fn sender_receiver() -> (TestSubsystemSender, mpsc::UnboundedReceiver<AllMes
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<T> overseer::SubsystemSender<T> for TestSubsystemSender
|
||||
impl<OutgoingMessage> overseer::SubsystemSender<OutgoingMessage> for TestSubsystemSender
|
||||
where
|
||||
T: Into<AllMessages> + Send + 'static,
|
||||
AllMessages: From<OutgoingMessage>,
|
||||
OutgoingMessage: Send + 'static,
|
||||
{
|
||||
async fn send_message(&mut self, msg: T) {
|
||||
async fn send_message(&mut self, msg: OutgoingMessage) {
|
||||
self.tx.send(msg.into()).await.expect("test overseer no longer live");
|
||||
}
|
||||
|
||||
async fn send_messages<X>(&mut self, msgs: X)
|
||||
async fn send_messages<I>(&mut self, msgs: I)
|
||||
where
|
||||
X: IntoIterator<Item = T> + Send,
|
||||
X::IntoIter: Send,
|
||||
I: IntoIterator<Item = OutgoingMessage> + Send,
|
||||
I::IntoIter: Send,
|
||||
{
|
||||
let mut iter = stream::iter(msgs.into_iter().map(|msg| Ok(msg.into())));
|
||||
self.tx.send_all(&mut iter).await.expect("test overseer no longer live");
|
||||
}
|
||||
|
||||
fn send_unbounded_message(&mut self, msg: T) {
|
||||
fn send_unbounded_message(&mut self, msg: OutgoingMessage) {
|
||||
self.tx.unbounded_send(msg.into()).expect("test overseer no longer live");
|
||||
}
|
||||
}
|
||||
@@ -180,16 +181,17 @@ pub struct TestSubsystemContext<M, S> {
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<M, S> overseer::SubsystemContext for TestSubsystemContext<M, S>
|
||||
impl<M, Spawner> overseer::SubsystemContext for TestSubsystemContext<M, Spawner>
|
||||
where
|
||||
M: std::fmt::Debug + Send + 'static,
|
||||
M: overseer::AssociateOutgoing + std::fmt::Debug + Send + 'static,
|
||||
AllMessages: From<<M as overseer::AssociateOutgoing>::OutgoingMessages>,
|
||||
AllMessages: From<M>,
|
||||
S: SpawnNamed + Send + 'static,
|
||||
Spawner: SpawnNamed + Send + 'static,
|
||||
{
|
||||
type Message = M;
|
||||
type Sender = TestSubsystemSender;
|
||||
type Signal = OverseerSignal;
|
||||
type AllMessages = AllMessages;
|
||||
type OutgoingMessages = <M as overseer::AssociateOutgoing>::OutgoingMessages;
|
||||
type Error = SubsystemError;
|
||||
|
||||
async fn try_recv(&mut self) -> Result<Option<FromOverseer<M>>, ()> {
|
||||
@@ -316,8 +318,13 @@ pub struct ForwardSubsystem<M>(pub mpsc::Sender<M>);
|
||||
|
||||
impl<M, Context> overseer::Subsystem<Context, SubsystemError> for ForwardSubsystem<M>
|
||||
where
|
||||
M: std::fmt::Debug + Send + 'static,
|
||||
Context: SubsystemContext<Message = M> + overseer::SubsystemContext<Message = M>,
|
||||
M: overseer::AssociateOutgoing + std::fmt::Debug + Send + 'static,
|
||||
Context: overseer::SubsystemContext<
|
||||
Message = M,
|
||||
Signal = OverseerSignal,
|
||||
Error = SubsystemError,
|
||||
OutgoingMessages = <M as overseer::AssociateOutgoing>::OutgoingMessages,
|
||||
>,
|
||||
{
|
||||
fn start(mut self, mut ctx: Context) -> SpawnedSubsystem {
|
||||
let future = Box::pin(async move {
|
||||
|
||||
@@ -39,7 +39,7 @@ pub async fn determine_new_blocks<E, Sender>(
|
||||
lower_bound_number: BlockNumber,
|
||||
) -> Result<Vec<(Hash, Header)>, E>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<ChainApiMessage>,
|
||||
{
|
||||
const ANCESTRY_STEP: usize = 4;
|
||||
|
||||
|
||||
@@ -26,9 +26,7 @@
|
||||
|
||||
use polkadot_node_subsystem::{
|
||||
errors::{RuntimeApiError, SubsystemError},
|
||||
messages::{
|
||||
AllMessages, BoundToRelayParent, RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender,
|
||||
},
|
||||
messages::{BoundToRelayParent, RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender},
|
||||
overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem,
|
||||
SubsystemContext, SubsystemSender,
|
||||
};
|
||||
@@ -144,7 +142,7 @@ pub async fn request_from_runtime<RequestBuilder, Response, Sender>(
|
||||
) -> RuntimeApiReceiver<Response>
|
||||
where
|
||||
RequestBuilder: FnOnce(RuntimeApiSender<Response>) -> RuntimeApiRequest,
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
let (tx, rx) = oneshot::channel();
|
||||
|
||||
@@ -176,7 +174,7 @@ macro_rules! specialize_requests {
|
||||
$(
|
||||
$param_name: $param_ty,
|
||||
)*
|
||||
sender: &mut impl SubsystemSender,
|
||||
sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
|
||||
) -> RuntimeApiReceiver<$return_ty>
|
||||
{
|
||||
request_from_runtime(parent, sender, |tx| RuntimeApiRequest::$request_variant(
|
||||
@@ -329,11 +327,14 @@ pub struct Validator {
|
||||
|
||||
impl Validator {
|
||||
/// Get a struct representing this node's validator if this node is in fact a validator in the context of the given block.
|
||||
pub async fn new(
|
||||
pub async fn new<S>(
|
||||
parent: Hash,
|
||||
keystore: SyncCryptoStorePtr,
|
||||
sender: &mut impl SubsystemSender,
|
||||
) -> Result<Self, Error> {
|
||||
sender: &mut S,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
S: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
// Note: request_validators and request_session_index_for_child do not and cannot
|
||||
// run concurrently: they both have a mutable handle to the same sender.
|
||||
// However, each of them returns a oneshot::Receiver, and those are resolved concurrently.
|
||||
@@ -397,14 +398,14 @@ impl Drop for AbortOnDrop {
|
||||
}
|
||||
|
||||
/// A `JobHandle` manages a particular job for a subsystem.
|
||||
struct JobHandle<ToJob> {
|
||||
struct JobHandle<Consumes> {
|
||||
_abort_handle: AbortOnDrop,
|
||||
to_job: mpsc::Sender<ToJob>,
|
||||
to_job: mpsc::Sender<Consumes>,
|
||||
}
|
||||
|
||||
impl<ToJob> JobHandle<ToJob> {
|
||||
impl<Consumes> JobHandle<Consumes> {
|
||||
/// Send a message to the job.
|
||||
async fn send_msg(&mut self, msg: ToJob) -> Result<(), Error> {
|
||||
async fn send_msg(&mut self, msg: Consumes) -> Result<(), Error> {
|
||||
self.to_job.send(msg).await.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@@ -418,49 +419,25 @@ pub enum FromJobCommand {
|
||||
}
|
||||
|
||||
/// A sender for messages from jobs, as well as commands to the overseer.
|
||||
pub struct JobSender<S: SubsystemSender> {
|
||||
pub struct JobSender<S> {
|
||||
sender: S,
|
||||
from_job: mpsc::Sender<FromJobCommand>,
|
||||
}
|
||||
|
||||
// A custom clone impl, since M does not need to impl `Clone`
|
||||
// which `#[derive(Clone)]` requires.
|
||||
impl<S: SubsystemSender> Clone for JobSender<S> {
|
||||
impl<S: Clone> Clone for JobSender<S> {
|
||||
fn clone(&self) -> Self {
|
||||
Self { sender: self.sender.clone(), from_job: self.from_job.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: SubsystemSender> JobSender<S> {
|
||||
impl<S> JobSender<S> {
|
||||
/// Get access to the underlying subsystem sender.
|
||||
pub fn subsystem_sender(&mut self) -> &mut S {
|
||||
&mut self.sender
|
||||
}
|
||||
|
||||
/// Send a direct message to some other `Subsystem`, routed based on message type.
|
||||
pub async fn send_message(&mut self, msg: impl Into<AllMessages>) {
|
||||
self.sender.send_message(msg.into()).await
|
||||
}
|
||||
|
||||
/// Send multiple direct messages to other `Subsystem`s, routed based on message type.
|
||||
pub async fn send_messages<T, M>(&mut self, msgs: T)
|
||||
where
|
||||
T: IntoIterator<Item = M> + Send,
|
||||
T::IntoIter: Send,
|
||||
M: Into<AllMessages>,
|
||||
{
|
||||
self.sender.send_messages(msgs.into_iter().map(|m| m.into())).await
|
||||
}
|
||||
|
||||
/// Send a message onto the unbounded queue of some other `Subsystem`, routed based on message
|
||||
/// type.
|
||||
///
|
||||
/// This function should be used only when there is some other bounding factor on the messages
|
||||
/// sent with it. Otherwise, it risks a memory leak.
|
||||
pub fn send_unbounded_message(&mut self, msg: impl Into<AllMessages>) {
|
||||
self.sender.send_unbounded_message(msg.into())
|
||||
}
|
||||
|
||||
/// Send a command to the subsystem, to be relayed onwards to the overseer.
|
||||
pub async fn send_command(&mut self, msg: FromJobCommand) -> Result<(), mpsc::SendError> {
|
||||
self.from_job.send(msg).await
|
||||
@@ -470,23 +447,23 @@ impl<S: SubsystemSender> JobSender<S> {
|
||||
#[async_trait::async_trait]
|
||||
impl<S, M> overseer::SubsystemSender<M> for JobSender<S>
|
||||
where
|
||||
M: Send + 'static + Into<AllMessages>,
|
||||
S: SubsystemSender + Clone,
|
||||
M: Send + 'static,
|
||||
S: SubsystemSender<M> + Clone,
|
||||
{
|
||||
async fn send_message(&mut self, msg: M) {
|
||||
self.sender.send_message(msg.into()).await
|
||||
self.sender.send_message(msg).await
|
||||
}
|
||||
|
||||
async fn send_messages<T>(&mut self, msgs: T)
|
||||
async fn send_messages<I>(&mut self, msgs: I)
|
||||
where
|
||||
T: IntoIterator<Item = M> + Send,
|
||||
T::IntoIter: Send,
|
||||
I: IntoIterator<Item = M> + Send,
|
||||
I::IntoIter: Send,
|
||||
{
|
||||
self.sender.send_messages(msgs.into_iter().map(|m| m.into())).await
|
||||
self.sender.send_messages(msgs).await
|
||||
}
|
||||
|
||||
fn send_unbounded_message(&mut self, msg: M) {
|
||||
self.sender.send_unbounded_message(msg.into())
|
||||
self.sender.send_unbounded_message(msg)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -506,6 +483,14 @@ impl fmt::Debug for FromJobCommand {
|
||||
pub trait JobTrait: Unpin + Sized {
|
||||
/// Message type used to send messages to the job.
|
||||
type ToJob: 'static + BoundToRelayParent + Send;
|
||||
|
||||
/// The set of outgoing messages to be accumulated into.
|
||||
type OutgoingMessages: 'static + Send;
|
||||
|
||||
/// The sender to send outgoing messages.
|
||||
// The trait bounds are rather minimal.
|
||||
type Sender: 'static + Send + Clone;
|
||||
|
||||
/// Job runtime error.
|
||||
type Error: 'static + std::error::Error + Send;
|
||||
/// Extra arguments this job needs to run properly.
|
||||
@@ -525,12 +510,12 @@ pub trait JobTrait: Unpin + Sized {
|
||||
/// Run a job for the given relay `parent`.
|
||||
///
|
||||
/// The job should be ended when `receiver` returns `None`.
|
||||
fn run<S: SubsystemSender>(
|
||||
fn run(
|
||||
leaf: ActivatedLeaf,
|
||||
run_args: Self::RunArgs,
|
||||
metrics: Self::Metrics,
|
||||
receiver: mpsc::Receiver<Self::ToJob>,
|
||||
sender: JobSender<S>,
|
||||
sender: JobSender<Self::Sender>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>>;
|
||||
}
|
||||
|
||||
@@ -572,15 +557,14 @@ where
|
||||
}
|
||||
|
||||
/// Spawn a new job for this `parent_hash`, with whatever args are appropriate.
|
||||
fn spawn_job<Job, Sender>(
|
||||
fn spawn_job<Job>(
|
||||
&mut self,
|
||||
leaf: ActivatedLeaf,
|
||||
run_args: Job::RunArgs,
|
||||
metrics: Job::Metrics,
|
||||
sender: Sender,
|
||||
sender: Job::Sender,
|
||||
) where
|
||||
Job: JobTrait<ToJob = ToJob>,
|
||||
Sender: SubsystemSender,
|
||||
{
|
||||
let hash = leaf.hash;
|
||||
let (to_job_tx, to_job_rx) = mpsc::channel(JOB_CHANNEL_CAPACITY);
|
||||
@@ -697,8 +681,12 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
|
||||
pub async fn run<Context>(self, mut ctx: Context)
|
||||
where
|
||||
Spawner: SpawnNamed + Send + Clone + Unpin + 'static,
|
||||
Context: SubsystemContext<Message = <Job as JobTrait>::ToJob, Signal = OverseerSignal>,
|
||||
<Context as SubsystemContext>::Sender: SubsystemSender,
|
||||
Context: SubsystemContext<
|
||||
Message = <Job as JobTrait>::ToJob,
|
||||
OutgoingMessages = <Job as JobTrait>::OutgoingMessages,
|
||||
Sender = <Job as JobTrait>::Sender,
|
||||
Signal = OverseerSignal,
|
||||
>,
|
||||
Job: 'static + JobTrait + Send,
|
||||
<Job as JobTrait>::RunArgs: Clone + Sync,
|
||||
<Job as JobTrait>::ToJob:
|
||||
@@ -719,7 +707,7 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
|
||||
}))) => {
|
||||
for activated in activated {
|
||||
let sender = ctx.sender().clone();
|
||||
jobs.spawn_job::<Job, _>(
|
||||
jobs.spawn_job::<Job>(
|
||||
activated,
|
||||
run_args.clone(),
|
||||
metrics.clone(),
|
||||
@@ -773,11 +761,15 @@ impl<Job: JobTrait, Spawner> JobSubsystem<Job, Spawner> {
|
||||
impl<Context, Job, Spawner> Subsystem<Context, SubsystemError> for JobSubsystem<Job, Spawner>
|
||||
where
|
||||
Spawner: SpawnNamed + Send + Clone + Unpin + 'static,
|
||||
Context: SubsystemContext<Message = Job::ToJob, Signal = OverseerSignal>,
|
||||
Context: SubsystemContext<
|
||||
Message = Job::ToJob,
|
||||
Signal = OverseerSignal,
|
||||
OutgoingMessages = <Job as JobTrait>::OutgoingMessages,
|
||||
Sender = <Job as JobTrait>::Sender,
|
||||
>,
|
||||
Job: 'static + JobTrait + Send,
|
||||
Job::RunArgs: Clone + Sync,
|
||||
<Job as JobTrait>::ToJob:
|
||||
Sync + From<<Context as polkadot_overseer::SubsystemContext>::Message>,
|
||||
<Job as JobTrait>::ToJob: Sync + From<<Context as SubsystemContext>::Message>,
|
||||
Job::Metrics: Sync,
|
||||
{
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
|
||||
@@ -26,7 +26,7 @@ use futures::channel::oneshot;
|
||||
use polkadot_node_subsystem::{
|
||||
errors::RuntimeApiError,
|
||||
messages::{RuntimeApiMessage, RuntimeApiRequest},
|
||||
overseer, SubsystemContext,
|
||||
overseer,
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
@@ -94,16 +94,19 @@ pub struct RollingSessionWindow {
|
||||
|
||||
impl RollingSessionWindow {
|
||||
/// Initialize a new session info cache with the given window size.
|
||||
pub async fn new(
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
pub async fn new<Sender>(
|
||||
mut sender: Sender,
|
||||
window_size: SessionWindowSize,
|
||||
block_hash: Hash,
|
||||
) -> Result<Self, SessionsUnavailable> {
|
||||
let session_index = get_session_index_for_child(ctx, block_hash).await?;
|
||||
) -> Result<Self, SessionsUnavailable>
|
||||
where
|
||||
Sender: overseer::SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
let session_index = get_session_index_for_child(&mut sender, block_hash).await?;
|
||||
|
||||
let window_start = session_index.saturating_sub(window_size.get() - 1);
|
||||
|
||||
match load_all_sessions(ctx, block_hash, window_start, session_index).await {
|
||||
match load_all_sessions(&mut sender, block_hash, window_start, session_index).await {
|
||||
Err(kind) => Err(SessionsUnavailable {
|
||||
kind,
|
||||
info: Some(SessionsUnavailableInfo {
|
||||
@@ -154,10 +157,10 @@ impl RollingSessionWindow {
|
||||
/// some backwards drift in session index is acceptable.
|
||||
pub async fn cache_session_info_for_head(
|
||||
&mut self,
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
|
||||
block_hash: Hash,
|
||||
) -> Result<SessionWindowUpdate, SessionsUnavailable> {
|
||||
let session_index = get_session_index_for_child(ctx, block_hash).await?;
|
||||
let session_index = get_session_index_for_child(sender, block_hash).await?;
|
||||
|
||||
let old_window_start = self.earliest_session;
|
||||
|
||||
@@ -177,7 +180,7 @@ impl RollingSessionWindow {
|
||||
|
||||
let fresh_start = if latest < window_start { window_start } else { latest + 1 };
|
||||
|
||||
match load_all_sessions(ctx, block_hash, fresh_start, session_index).await {
|
||||
match load_all_sessions(sender, block_hash, fresh_start, session_index).await {
|
||||
Err(kind) => Err(SessionsUnavailable {
|
||||
kind,
|
||||
info: Some(SessionsUnavailableInfo {
|
||||
@@ -215,17 +218,18 @@ impl RollingSessionWindow {
|
||||
// cleaner to just call the runtime API directly without needing to create an instance
|
||||
// of `RuntimeInfo`.
|
||||
async fn get_session_index_for_child(
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
|
||||
block_hash: Hash,
|
||||
) -> Result<SessionIndex, SessionsUnavailable> {
|
||||
let (s_tx, s_rx) = oneshot::channel();
|
||||
|
||||
// We're requesting session index of a child to populate the cache in advance.
|
||||
ctx.send_message(RuntimeApiMessage::Request(
|
||||
block_hash,
|
||||
RuntimeApiRequest::SessionIndexForChild(s_tx),
|
||||
))
|
||||
.await;
|
||||
sender
|
||||
.send_message(RuntimeApiMessage::Request(
|
||||
block_hash,
|
||||
RuntimeApiRequest::SessionIndexForChild(s_tx),
|
||||
))
|
||||
.await;
|
||||
|
||||
match s_rx.await {
|
||||
Ok(Ok(s)) => Ok(s),
|
||||
@@ -243,7 +247,7 @@ async fn get_session_index_for_child(
|
||||
}
|
||||
|
||||
async fn load_all_sessions(
|
||||
ctx: &mut (impl SubsystemContext + overseer::SubsystemContext),
|
||||
sender: &mut impl overseer::SubsystemSender<RuntimeApiMessage>,
|
||||
block_hash: Hash,
|
||||
start: SessionIndex,
|
||||
end_inclusive: SessionIndex,
|
||||
@@ -251,11 +255,12 @@ async fn load_all_sessions(
|
||||
let mut v = Vec::new();
|
||||
for i in start..=end_inclusive {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
ctx.send_message(RuntimeApiMessage::Request(
|
||||
block_hash,
|
||||
RuntimeApiRequest::SessionInfo(i, tx),
|
||||
))
|
||||
.await;
|
||||
sender
|
||||
.send_message(RuntimeApiMessage::Request(
|
||||
block_hash,
|
||||
RuntimeApiRequest::SessionInfo(i, tx),
|
||||
))
|
||||
.await;
|
||||
|
||||
let session_info = match rx.await {
|
||||
Ok(Ok(Some(s))) => s,
|
||||
@@ -274,7 +279,10 @@ async fn load_all_sessions(
|
||||
mod tests {
|
||||
use super::*;
|
||||
use assert_matches::assert_matches;
|
||||
use polkadot_node_subsystem::messages::{AllMessages, AvailabilityRecoveryMessage};
|
||||
use polkadot_node_subsystem::{
|
||||
messages::{AllMessages, AvailabilityRecoveryMessage},
|
||||
SubsystemContext,
|
||||
};
|
||||
use polkadot_node_subsystem_test_helpers::make_subsystem_context;
|
||||
use polkadot_primitives::v2::Header;
|
||||
use sp_core::testing::TaskExecutor;
|
||||
@@ -319,13 +327,16 @@ mod tests {
|
||||
|
||||
let hash = header.hash();
|
||||
|
||||
let sender = ctx.sender();
|
||||
|
||||
let test_fut = {
|
||||
Box::pin(async move {
|
||||
let window = match window {
|
||||
None =>
|
||||
RollingSessionWindow::new(&mut ctx, TEST_WINDOW_SIZE, hash).await.unwrap(),
|
||||
None => RollingSessionWindow::new(sender.clone(), TEST_WINDOW_SIZE, hash)
|
||||
.await
|
||||
.unwrap(),
|
||||
Some(mut window) => {
|
||||
window.cache_session_info_for_head(&mut ctx, hash).await.unwrap();
|
||||
window.cache_session_info_for_head(sender, hash).await.unwrap();
|
||||
window
|
||||
},
|
||||
};
|
||||
@@ -495,8 +506,9 @@ mod tests {
|
||||
let hash = header.hash();
|
||||
|
||||
let test_fut = {
|
||||
let sender = ctx.sender().clone();
|
||||
Box::pin(async move {
|
||||
let res = RollingSessionWindow::new(&mut ctx, TEST_WINDOW_SIZE, hash).await;
|
||||
let res = RollingSessionWindow::new(sender, TEST_WINDOW_SIZE, hash).await;
|
||||
assert!(res.is_err());
|
||||
})
|
||||
};
|
||||
@@ -555,8 +567,9 @@ mod tests {
|
||||
|
||||
let test_fut = {
|
||||
Box::pin(async move {
|
||||
let sender = ctx.sender().clone();
|
||||
let window =
|
||||
RollingSessionWindow::new(&mut ctx, TEST_WINDOW_SIZE, hash).await.unwrap();
|
||||
RollingSessionWindow::new(sender, TEST_WINDOW_SIZE, hash).await.unwrap();
|
||||
|
||||
assert_eq!(window.earliest_session, session);
|
||||
assert_eq!(window.session_info, vec![dummy_session_info(session)]);
|
||||
|
||||
@@ -25,7 +25,7 @@ use sp_application_crypto::AppKey;
|
||||
use sp_core::crypto::ByteArray;
|
||||
use sp_keystore::{CryptoStore, SyncCryptoStorePtr};
|
||||
|
||||
use polkadot_node_subsystem::{SubsystemContext, SubsystemSender};
|
||||
use polkadot_node_subsystem::{messages::RuntimeApiMessage, overseer, SubsystemSender};
|
||||
use polkadot_primitives::v2::{
|
||||
CandidateEvent, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, OccupiedCore,
|
||||
ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned,
|
||||
@@ -123,7 +123,7 @@ impl RuntimeInfo {
|
||||
parent: Hash,
|
||||
) -> Result<SessionIndex>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
match self.session_index_cache.get(&parent) {
|
||||
Some(index) => Ok(*index),
|
||||
@@ -143,7 +143,7 @@ impl RuntimeInfo {
|
||||
relay_parent: Hash,
|
||||
) -> Result<&'a ExtendedSessionInfo>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
let session_index = self.get_session_index_for_child(sender, relay_parent).await?;
|
||||
|
||||
@@ -161,7 +161,7 @@ impl RuntimeInfo {
|
||||
session_index: SessionIndex,
|
||||
) -> Result<&'a ExtendedSessionInfo>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
if !self.session_info_cache.contains(&session_index) {
|
||||
let session_info =
|
||||
@@ -190,7 +190,7 @@ impl RuntimeInfo {
|
||||
std::result::Result<Signed<Payload, RealPayload>, UncheckedSigned<Payload, RealPayload>>,
|
||||
>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
Payload: EncodeAs<RealPayload> + Clone,
|
||||
RealPayload: Encode + Clone,
|
||||
{
|
||||
@@ -257,25 +257,25 @@ where
|
||||
}
|
||||
|
||||
/// Request availability cores from the runtime.
|
||||
pub async fn get_availability_cores<Context>(
|
||||
ctx: &mut Context,
|
||||
pub async fn get_availability_cores<Sender>(
|
||||
sender: &mut Sender,
|
||||
relay_parent: Hash,
|
||||
) -> Result<Vec<CoreState>>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
Sender: overseer::SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
recv_runtime(request_availability_cores(relay_parent, ctx.sender()).await).await
|
||||
recv_runtime(request_availability_cores(relay_parent, sender).await).await
|
||||
}
|
||||
|
||||
/// Variant of `request_availability_cores` that only returns occupied ones.
|
||||
pub async fn get_occupied_cores<Context>(
|
||||
ctx: &mut Context,
|
||||
pub async fn get_occupied_cores<Sender>(
|
||||
sender: &mut Sender,
|
||||
relay_parent: Hash,
|
||||
) -> Result<Vec<OccupiedCore>>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
Sender: overseer::SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
let cores = get_availability_cores(ctx, relay_parent).await?;
|
||||
let cores = get_availability_cores(sender, relay_parent).await?;
|
||||
|
||||
Ok(cores
|
||||
.into_iter()
|
||||
@@ -290,17 +290,16 @@ where
|
||||
}
|
||||
|
||||
/// Get group rotation info based on the given `relay_parent`.
|
||||
pub async fn get_group_rotation_info<Context>(
|
||||
ctx: &mut Context,
|
||||
pub async fn get_group_rotation_info<Sender>(
|
||||
sender: &mut Sender,
|
||||
relay_parent: Hash,
|
||||
) -> Result<GroupRotationInfo>
|
||||
where
|
||||
Context: SubsystemContext,
|
||||
Sender: overseer::SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
// We drop `groups` here as we don't need them, because of `RuntimeInfo`. Ideally we would not
|
||||
// fetch them in the first place.
|
||||
let (_, info) =
|
||||
recv_runtime(request_validator_groups(relay_parent, ctx.sender()).await).await?;
|
||||
let (_, info) = recv_runtime(request_validator_groups(relay_parent, sender).await).await?;
|
||||
Ok(info)
|
||||
}
|
||||
|
||||
@@ -310,7 +309,7 @@ pub async fn get_candidate_events<Sender>(
|
||||
relay_parent: Hash,
|
||||
) -> Result<Vec<CandidateEvent>>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
recv_runtime(request_candidate_events(relay_parent, sender).await).await
|
||||
}
|
||||
@@ -321,7 +320,7 @@ pub async fn get_on_chain_votes<Sender>(
|
||||
relay_parent: Hash,
|
||||
) -> Result<Option<ScrapedOnChainVotes>>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
recv_runtime(request_on_chain_votes(relay_parent, sender).await).await
|
||||
}
|
||||
@@ -333,7 +332,7 @@ pub async fn get_validation_code_by_hash<Sender>(
|
||||
validation_code_hash: ValidationCodeHash,
|
||||
) -> Result<Option<ValidationCode>>
|
||||
where
|
||||
Sender: SubsystemSender,
|
||||
Sender: SubsystemSender<RuntimeApiMessage>,
|
||||
{
|
||||
recv_runtime(request_validation_code_by_hash(relay_parent, validation_code_hash, sender).await)
|
||||
.await
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#![cfg(test)]
|
||||
|
||||
use super::*;
|
||||
use assert_matches::assert_matches;
|
||||
use executor::block_on;
|
||||
@@ -44,8 +46,9 @@ use thiserror::Error;
|
||||
|
||||
// job structs are constructed within JobTrait::run
|
||||
// most will want to retain the sender and receiver, as well as whatever other data they like
|
||||
struct FakeCollatorProtocolJob {
|
||||
struct FakeCollatorProtocolJob<Sender> {
|
||||
receiver: mpsc::Receiver<CollatorProtocolMessage>,
|
||||
_phantom: std::marker::PhantomData<Sender>,
|
||||
}
|
||||
|
||||
// Error will mostly be a wrapper to make the try operator more convenient;
|
||||
@@ -57,8 +60,18 @@ enum Error {
|
||||
Sending(#[from] mpsc::SendError),
|
||||
}
|
||||
|
||||
impl JobTrait for FakeCollatorProtocolJob {
|
||||
impl<Sender> JobTrait for FakeCollatorProtocolJob<Sender>
|
||||
where
|
||||
Sender: overseer::CollatorProtocolSenderTrait
|
||||
+ std::marker::Unpin
|
||||
+ overseer::SubsystemSender<CollatorProtocolMessage>,
|
||||
JobSender<Sender>: overseer::CollatorProtocolSenderTrait
|
||||
+ std::marker::Unpin
|
||||
+ overseer::SubsystemSender<CollatorProtocolMessage>,
|
||||
{
|
||||
type ToJob = CollatorProtocolMessage;
|
||||
type OutgoingMessages = overseer::CollatorProtocolOutgoingMessages;
|
||||
type Sender = Sender;
|
||||
type Error = Error;
|
||||
type RunArgs = bool;
|
||||
type Metrics = ();
|
||||
@@ -68,20 +81,21 @@ impl JobTrait for FakeCollatorProtocolJob {
|
||||
/// Run a job for the parent block indicated
|
||||
//
|
||||
// this function is in charge of creating and executing the job's main loop
|
||||
fn run<S: SubsystemSender>(
|
||||
fn run(
|
||||
_: ActivatedLeaf,
|
||||
run_args: Self::RunArgs,
|
||||
_metrics: Self::Metrics,
|
||||
receiver: mpsc::Receiver<CollatorProtocolMessage>,
|
||||
mut sender: JobSender<S>,
|
||||
mut sender: JobSender<Sender>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Self::Error>> + Send>> {
|
||||
async move {
|
||||
let job = FakeCollatorProtocolJob { receiver };
|
||||
let job =
|
||||
FakeCollatorProtocolJob { receiver, _phantom: std::marker::PhantomData::<Sender> };
|
||||
|
||||
if run_args {
|
||||
sender
|
||||
.send_message(CollatorProtocolMessage::Invalid(
|
||||
Default::default(),
|
||||
dummy_hash(),
|
||||
dummy_candidate_receipt(dummy_hash()),
|
||||
))
|
||||
.await;
|
||||
@@ -95,7 +109,10 @@ impl JobTrait for FakeCollatorProtocolJob {
|
||||
}
|
||||
}
|
||||
|
||||
impl FakeCollatorProtocolJob {
|
||||
impl<Sender> FakeCollatorProtocolJob<Sender>
|
||||
where
|
||||
Sender: overseer::CollatorProtocolSenderTrait,
|
||||
{
|
||||
async fn run_loop(mut self) -> Result<(), Error> {
|
||||
loop {
|
||||
match self.receiver.next().await {
|
||||
@@ -111,7 +128,8 @@ impl FakeCollatorProtocolJob {
|
||||
}
|
||||
|
||||
// with the job defined, it's straightforward to get a subsystem implementation.
|
||||
type FakeCollatorProtocolSubsystem<Spawner> = JobSubsystem<FakeCollatorProtocolJob, Spawner>;
|
||||
type FakeCollatorProtocolSubsystem<Spawner> =
|
||||
JobSubsystem<FakeCollatorProtocolJob<test_helpers::TestSubsystemSender>, Spawner>;
|
||||
|
||||
// this type lets us pretend to be the overseer
|
||||
type OverseerHandle = test_helpers::TestSubsystemContextHandle<CollatorProtocolMessage>;
|
||||
|
||||
@@ -24,9 +24,7 @@
|
||||
pub use jaeger::*;
|
||||
pub use polkadot_node_jaeger as jaeger;
|
||||
|
||||
pub use polkadot_overseer::{
|
||||
self as overseer, ActiveLeavesUpdate, OverseerConnector, OverseerSignal,
|
||||
};
|
||||
pub use polkadot_overseer::{self as overseer, *};
|
||||
|
||||
pub use polkadot_node_subsystem_types::{
|
||||
errors::{self, *},
|
||||
@@ -54,37 +52,5 @@ pub type FromOverseer<M> = polkadot_overseer::gen::FromOverseer<M, OverseerSigna
|
||||
pub type SubsystemInstance<Message> =
|
||||
polkadot_overseer::gen::SubsystemInstance<Message, OverseerSignal>;
|
||||
|
||||
/// Sender trait for the `AllMessages` wrapper.
|
||||
pub trait SubsystemSender: polkadot_overseer::gen::SubsystemSender<messages::AllMessages> {}
|
||||
|
||||
impl<T> SubsystemSender for T where T: polkadot_overseer::gen::SubsystemSender<messages::AllMessages>
|
||||
{}
|
||||
|
||||
/// Spawned subsystem.
|
||||
pub type SpawnedSubsystem = polkadot_overseer::gen::SpawnedSubsystem<SubsystemError>;
|
||||
|
||||
/// Convenience trait specialization.
|
||||
pub trait SubsystemContext:
|
||||
polkadot_overseer::gen::SubsystemContext<
|
||||
Signal = OverseerSignal,
|
||||
AllMessages = messages::AllMessages,
|
||||
Error = SubsystemError,
|
||||
>
|
||||
{
|
||||
/// The message type the subsystem consumes.
|
||||
type Message: std::fmt::Debug + Send + 'static;
|
||||
/// Sender type to communicate with other subsystems.
|
||||
type Sender: SubsystemSender + Send + Clone + 'static;
|
||||
}
|
||||
|
||||
impl<T> SubsystemContext for T
|
||||
where
|
||||
T: polkadot_overseer::gen::SubsystemContext<
|
||||
Signal = OverseerSignal,
|
||||
AllMessages = messages::AllMessages,
|
||||
Error = SubsystemError,
|
||||
>,
|
||||
{
|
||||
type Message = <Self as polkadot_overseer::gen::SubsystemContext>::Message;
|
||||
type Sender = <Self as polkadot_overseer::gen::SubsystemContext>::Sender;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user