feat: initialize Kurdistan SDK - independent fork of Polkadot SDK
This commit is contained in:
@@ -0,0 +1,42 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use pezkuwi_primitives::CommittedCandidateReceiptError;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error(transparent)]
|
||||
Subsystem(#[from] pezkuwi_node_subsystem::SubsystemError),
|
||||
#[error(transparent)]
|
||||
OneshotRecv(#[from] futures::channel::oneshot::Canceled),
|
||||
#[error(transparent)]
|
||||
Runtime(#[from] pezkuwi_node_subsystem::errors::RuntimeApiError),
|
||||
#[error(transparent)]
|
||||
Util(#[from] pezkuwi_node_subsystem_util::Error),
|
||||
#[error(transparent)]
|
||||
UtilRuntime(#[from] pezkuwi_node_subsystem_util::runtime::Error),
|
||||
#[error(transparent)]
|
||||
Erasure(#[from] pezkuwi_erasure_coding::Error),
|
||||
#[error("Collation submitted before initialization")]
|
||||
SubmittedBeforeInit,
|
||||
#[error("V2 core index check failed: {0}")]
|
||||
CandidateReceiptCheck(CommittedCandidateReceiptError),
|
||||
#[error("PoV size {0} exceeded maximum size of {1}")]
|
||||
POVSizeExceeded(usize, usize),
|
||||
}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, Error>;
|
||||
@@ -0,0 +1,637 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! The collation generation subsystem is the interface between pezkuwi and the collators.
|
||||
//!
|
||||
//! # Protocol
|
||||
//!
|
||||
//! On every `ActiveLeavesUpdate`:
|
||||
//!
|
||||
//! * If there is no collation generation config, ignore.
|
||||
//! * Otherwise, for each `activated` head in the update:
|
||||
//! * Determine if the para is scheduled on any core by fetching the `availability_cores` Runtime
|
||||
//! API.
|
||||
//! * Use the Runtime API subsystem to fetch the full validation data.
|
||||
//! * Invoke the `collator`, and use its outputs to produce a
|
||||
//! [`pezkuwi_primitives::CandidateReceiptV2`], signed with the configuration's `key`.
|
||||
//! * Dispatch a [`CollatorProtocolMessage::DistributeCollation`]`(receipt, pov)`.
|
||||
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use codec::Encode;
|
||||
use error::{Error, Result};
|
||||
use futures::{channel::oneshot, future::FutureExt, select};
|
||||
use pezkuwi_node_primitives::{
|
||||
AvailableData, Collation, CollationGenerationConfig, CollationSecondedSignal, PoV,
|
||||
SubmitCollationParams,
|
||||
};
|
||||
use pezkuwi_node_subsystem::{
|
||||
messages::{CollationGenerationMessage, CollatorProtocolMessage, RuntimeApiMessage},
|
||||
overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem,
|
||||
SubsystemContext, SubsystemError, SubsystemResult, SubsystemSender,
|
||||
};
|
||||
use pezkuwi_node_subsystem_util::{
|
||||
request_claim_queue, request_persisted_validation_data, request_session_index_for_child,
|
||||
request_validation_code_hash, request_validators, runtime::ClaimQueueSnapshot,
|
||||
};
|
||||
use pezkuwi_primitives::{
|
||||
transpose_claim_queue, CandidateCommitments, CandidateDescriptorV2,
|
||||
CommittedCandidateReceiptV2, CoreIndex, Hash, Id as ParaId, OccupiedCoreAssumption,
|
||||
PersistedValidationData, SessionIndex, TransposedClaimQueue, ValidationCodeHash,
|
||||
};
|
||||
use schnellru::{ByLength, LruMap};
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
|
||||
mod error;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
mod metrics;
|
||||
use self::metrics::Metrics;
|
||||
|
||||
const LOG_TARGET: &'static str = "teyrchain::collation-generation";
|
||||
|
||||
/// Collation Generation Subsystem
|
||||
pub struct CollationGenerationSubsystem {
|
||||
config: Option<Arc<CollationGenerationConfig>>,
|
||||
session_info_cache: SessionInfoCache,
|
||||
metrics: Metrics,
|
||||
}
|
||||
|
||||
#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)]
|
||||
impl CollationGenerationSubsystem {
|
||||
/// Create a new instance of the `CollationGenerationSubsystem`.
|
||||
pub fn new(metrics: Metrics) -> Self {
|
||||
Self { config: None, metrics, session_info_cache: SessionInfoCache::new() }
|
||||
}
|
||||
|
||||
/// Run this subsystem
|
||||
///
|
||||
/// Conceptually, this is very simple: it just loops forever.
|
||||
///
|
||||
/// - On incoming overseer messages, it starts or stops jobs as appropriate.
|
||||
/// - On other incoming messages, if they can be converted into `Job::ToJob` and include a hash,
|
||||
/// then they're forwarded to the appropriate individual job.
|
||||
/// - On outgoing messages from the jobs, it forwards them to the overseer.
|
||||
///
|
||||
/// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur.
|
||||
/// Otherwise, most are logged and then discarded.
|
||||
async fn run<Context>(mut self, mut ctx: Context) {
|
||||
loop {
|
||||
select! {
|
||||
incoming = ctx.recv().fuse() => {
|
||||
if self.handle_incoming::<Context>(incoming, &mut ctx).await {
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle an incoming message. return true if we should break afterwards.
|
||||
// note: this doesn't strictly need to be a separate function; it's more an administrative
|
||||
// function so that we don't clutter the run loop. It could in principle be inlined directly
|
||||
// into there. it should hopefully therefore be ok that it's an async function mutably borrowing
|
||||
// self.
|
||||
async fn handle_incoming<Context>(
|
||||
&mut self,
|
||||
incoming: SubsystemResult<FromOrchestra<<Context as SubsystemContext>::Message>>,
|
||||
ctx: &mut Context,
|
||||
) -> bool {
|
||||
match incoming {
|
||||
Ok(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
|
||||
activated,
|
||||
..
|
||||
}))) => {
|
||||
if let Err(err) = self.handle_new_activation(activated.map(|v| v.hash), ctx).await {
|
||||
gum::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activation");
|
||||
}
|
||||
|
||||
false
|
||||
},
|
||||
Ok(FromOrchestra::Signal(OverseerSignal::Conclude)) => true,
|
||||
Ok(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::Initialize(config),
|
||||
}) => {
|
||||
if self.config.is_some() {
|
||||
gum::error!(target: LOG_TARGET, "double initialization");
|
||||
} else {
|
||||
self.config = Some(Arc::new(config));
|
||||
}
|
||||
false
|
||||
},
|
||||
Ok(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::Reinitialize(config),
|
||||
}) => {
|
||||
self.config = Some(Arc::new(config));
|
||||
false
|
||||
},
|
||||
Ok(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::SubmitCollation(params),
|
||||
}) => {
|
||||
if let Err(err) = self.handle_submit_collation(params, ctx).await {
|
||||
gum::error!(target: LOG_TARGET, ?err, "Failed to submit collation");
|
||||
}
|
||||
|
||||
false
|
||||
},
|
||||
Ok(FromOrchestra::Signal(OverseerSignal::BlockFinalized(..))) => false,
|
||||
Err(err) => {
|
||||
gum::error!(
|
||||
target: LOG_TARGET,
|
||||
err = ?err,
|
||||
"error receiving message from subsystem context: {:?}",
|
||||
err
|
||||
);
|
||||
true
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_submit_collation<Context>(
|
||||
&mut self,
|
||||
params: SubmitCollationParams,
|
||||
ctx: &mut Context,
|
||||
) -> Result<()> {
|
||||
let Some(config) = &self.config else {
|
||||
return Err(Error::SubmittedBeforeInit);
|
||||
};
|
||||
let _timer = self.metrics.time_submit_collation();
|
||||
|
||||
let SubmitCollationParams {
|
||||
relay_parent,
|
||||
collation,
|
||||
parent_head,
|
||||
validation_code_hash,
|
||||
result_sender,
|
||||
core_index,
|
||||
} = params;
|
||||
|
||||
let mut validation_data = match request_persisted_validation_data(
|
||||
relay_parent,
|
||||
config.para_id,
|
||||
OccupiedCoreAssumption::TimedOut,
|
||||
ctx.sender(),
|
||||
)
|
||||
.await
|
||||
.await??
|
||||
{
|
||||
Some(v) => v,
|
||||
None => {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
relay_parent = ?relay_parent,
|
||||
our_para = %config.para_id,
|
||||
"No validation data for para - does it exist at this relay-parent?",
|
||||
);
|
||||
return Ok(());
|
||||
},
|
||||
};
|
||||
|
||||
// We need to swap the parent-head data, but all other fields here will be correct.
|
||||
validation_data.parent_head = parent_head;
|
||||
|
||||
let claim_queue = request_claim_queue(relay_parent, ctx.sender()).await.await??;
|
||||
|
||||
let session_index =
|
||||
request_session_index_for_child(relay_parent, ctx.sender()).await.await??;
|
||||
|
||||
let session_info =
|
||||
self.session_info_cache.get(relay_parent, session_index, ctx.sender()).await?;
|
||||
let collation = PreparedCollation {
|
||||
collation,
|
||||
relay_parent,
|
||||
para_id: config.para_id,
|
||||
validation_data,
|
||||
validation_code_hash,
|
||||
n_validators: session_info.n_validators,
|
||||
core_index,
|
||||
session_index,
|
||||
};
|
||||
|
||||
construct_and_distribute_receipt(
|
||||
collation,
|
||||
ctx.sender(),
|
||||
result_sender,
|
||||
&mut self.metrics,
|
||||
&transpose_claim_queue(claim_queue),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn handle_new_activation<Context>(
|
||||
&mut self,
|
||||
maybe_activated: Option<Hash>,
|
||||
ctx: &mut Context,
|
||||
) -> Result<()> {
|
||||
let Some(config) = &self.config else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let Some(relay_parent) = maybe_activated else { return Ok(()) };
|
||||
|
||||
// If there is no collation function provided, bail out early.
|
||||
// Important: Lookahead collator and slot based collator do not use `CollatorFn`.
|
||||
if config.collator.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let para_id = config.para_id;
|
||||
|
||||
let _timer = self.metrics.time_new_activation();
|
||||
|
||||
let session_index =
|
||||
request_session_index_for_child(relay_parent, ctx.sender()).await.await??;
|
||||
|
||||
let session_info =
|
||||
self.session_info_cache.get(relay_parent, session_index, ctx.sender()).await?;
|
||||
let n_validators = session_info.n_validators;
|
||||
|
||||
let claim_queue =
|
||||
ClaimQueueSnapshot::from(request_claim_queue(relay_parent, ctx.sender()).await.await??);
|
||||
|
||||
let assigned_cores = claim_queue
|
||||
.iter_all_claims()
|
||||
.filter_map(|(core_idx, para_ids)| {
|
||||
para_ids.iter().any(|¶_id| para_id == config.para_id).then_some(*core_idx)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Nothing to do if no core is assigned to us at any depth.
|
||||
if assigned_cores.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// We are being very optimistic here, but one of the cores could be pending availability
|
||||
// for some more blocks, or even time out. We assume all cores are being freed.
|
||||
|
||||
let mut validation_data = match request_persisted_validation_data(
|
||||
relay_parent,
|
||||
para_id,
|
||||
// Just use included assumption always. If there are no pending candidates it's a
|
||||
// no-op.
|
||||
OccupiedCoreAssumption::Included,
|
||||
ctx.sender(),
|
||||
)
|
||||
.await
|
||||
.await??
|
||||
{
|
||||
Some(v) => v,
|
||||
None => {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
relay_parent = ?relay_parent,
|
||||
our_para = %para_id,
|
||||
"validation data is not available",
|
||||
);
|
||||
return Ok(());
|
||||
},
|
||||
};
|
||||
|
||||
let validation_code_hash = match request_validation_code_hash(
|
||||
relay_parent,
|
||||
para_id,
|
||||
// Just use included assumption always. If there are no pending candidates it's a
|
||||
// no-op.
|
||||
OccupiedCoreAssumption::Included,
|
||||
ctx.sender(),
|
||||
)
|
||||
.await
|
||||
.await??
|
||||
{
|
||||
Some(v) => v,
|
||||
None => {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
relay_parent = ?relay_parent,
|
||||
our_para = %para_id,
|
||||
"validation code hash is not found.",
|
||||
);
|
||||
return Ok(());
|
||||
},
|
||||
};
|
||||
|
||||
let task_config = config.clone();
|
||||
let metrics = self.metrics.clone();
|
||||
let mut task_sender = ctx.sender().clone();
|
||||
|
||||
ctx.spawn(
|
||||
"chained-collation-builder",
|
||||
Box::pin(async move {
|
||||
let transposed_claim_queue = transpose_claim_queue(claim_queue.0.clone());
|
||||
|
||||
// Track used core indexes not to submit collations on the same core.
|
||||
let mut used_cores = HashSet::new();
|
||||
|
||||
for i in 0..assigned_cores.len() {
|
||||
// Get the collation.
|
||||
let collator_fn = match task_config.collator.as_ref() {
|
||||
Some(x) => x,
|
||||
None => return,
|
||||
};
|
||||
|
||||
let (collation, result_sender) =
|
||||
match collator_fn(relay_parent, &validation_data).await {
|
||||
Some(collation) => collation.into_inner(),
|
||||
None => {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
?para_id,
|
||||
"collator returned no collation on collate",
|
||||
);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
// Use the core_selector method from CandidateCommitments to extract
|
||||
// CoreSelector and ClaimQueueOffset.
|
||||
let mut commitments = CandidateCommitments::default();
|
||||
commitments.upward_messages = collation.upward_messages.clone();
|
||||
|
||||
let ump_signals = match commitments.ump_signals() {
|
||||
Ok(signals) => signals,
|
||||
Err(err) => {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
?para_id,
|
||||
"error processing UMP signals: {}",
|
||||
err
|
||||
);
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
let (cs_index, cq_offset) = ump_signals
|
||||
.core_selector()
|
||||
.map(|(cs_index, cq_offset)| (cs_index.0 as usize, cq_offset.0 as usize))
|
||||
.unwrap_or((i, 0));
|
||||
|
||||
// Identify the cores to build collations on using the given claim queue offset.
|
||||
let cores_to_build_on = claim_queue
|
||||
.iter_claims_at_depth(cq_offset)
|
||||
.filter_map(|(core_idx, para_id)| {
|
||||
(para_id == task_config.para_id).then_some(core_idx)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if cores_to_build_on.is_empty() {
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
?para_id,
|
||||
"no core is assigned to para at depth {}",
|
||||
cq_offset,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let descriptor_core_index =
|
||||
cores_to_build_on[cs_index % cores_to_build_on.len()];
|
||||
|
||||
// Ensure the core index has not been used before.
|
||||
if used_cores.contains(&descriptor_core_index.0) {
|
||||
gum::warn!(
|
||||
target: LOG_TARGET,
|
||||
?para_id,
|
||||
"teyrchain repeatedly selected the same core index: {}",
|
||||
descriptor_core_index.0,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
used_cores.insert(descriptor_core_index.0);
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
?para_id,
|
||||
"selected core index: {}",
|
||||
descriptor_core_index.0,
|
||||
);
|
||||
|
||||
// Distribute the collation.
|
||||
let parent_head = collation.head_data.clone();
|
||||
if let Err(err) = construct_and_distribute_receipt(
|
||||
PreparedCollation {
|
||||
collation,
|
||||
para_id,
|
||||
relay_parent,
|
||||
validation_data: validation_data.clone(),
|
||||
validation_code_hash,
|
||||
n_validators,
|
||||
core_index: descriptor_core_index,
|
||||
session_index,
|
||||
},
|
||||
&mut task_sender,
|
||||
result_sender,
|
||||
&metrics,
|
||||
&transposed_claim_queue,
|
||||
)
|
||||
.await
|
||||
{
|
||||
gum::error!(
|
||||
target: LOG_TARGET,
|
||||
"Failed to construct and distribute collation: {}",
|
||||
err
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Chain the collations. All else stays the same as we build the chained
|
||||
// collation on same relay parent.
|
||||
validation_data.parent_head = parent_head;
|
||||
}
|
||||
}),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[overseer::subsystem(CollationGeneration, error=SubsystemError, prefix=self::overseer)]
|
||||
impl<Context> CollationGenerationSubsystem {
|
||||
fn start(self, ctx: Context) -> SpawnedSubsystem {
|
||||
let future = async move {
|
||||
self.run(ctx).await;
|
||||
Ok(())
|
||||
}
|
||||
.boxed();
|
||||
|
||||
SpawnedSubsystem { name: "collation-generation-subsystem", future }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct PerSessionInfo {
|
||||
n_validators: usize,
|
||||
}
|
||||
|
||||
struct SessionInfoCache(LruMap<SessionIndex, PerSessionInfo>);
|
||||
|
||||
impl SessionInfoCache {
|
||||
fn new() -> Self {
|
||||
Self(LruMap::new(ByLength::new(2)))
|
||||
}
|
||||
|
||||
async fn get<Sender: SubsystemSender<RuntimeApiMessage>>(
|
||||
&mut self,
|
||||
relay_parent: Hash,
|
||||
session_index: SessionIndex,
|
||||
sender: &mut Sender,
|
||||
) -> Result<PerSessionInfo> {
|
||||
if let Some(info) = self.0.get(&session_index) {
|
||||
return Ok(info.clone());
|
||||
}
|
||||
|
||||
let n_validators =
|
||||
request_validators(relay_parent, &mut sender.clone()).await.await??.len();
|
||||
|
||||
let info = PerSessionInfo { n_validators };
|
||||
self.0.insert(session_index, info);
|
||||
Ok(self.0.get(&session_index).expect("Just inserted").clone())
|
||||
}
|
||||
}
|
||||
|
||||
struct PreparedCollation {
|
||||
collation: Collation,
|
||||
para_id: ParaId,
|
||||
relay_parent: Hash,
|
||||
validation_data: PersistedValidationData,
|
||||
validation_code_hash: ValidationCodeHash,
|
||||
n_validators: usize,
|
||||
core_index: CoreIndex,
|
||||
session_index: SessionIndex,
|
||||
}
|
||||
|
||||
/// Takes a prepared collation, along with its context, and produces a candidate receipt
|
||||
/// which is distributed to validators.
|
||||
async fn construct_and_distribute_receipt(
|
||||
collation: PreparedCollation,
|
||||
sender: &mut impl overseer::CollationGenerationSenderTrait,
|
||||
result_sender: Option<oneshot::Sender<CollationSecondedSignal>>,
|
||||
metrics: &Metrics,
|
||||
transposed_claim_queue: &TransposedClaimQueue,
|
||||
) -> Result<()> {
|
||||
let PreparedCollation {
|
||||
collation,
|
||||
para_id,
|
||||
relay_parent,
|
||||
validation_data,
|
||||
validation_code_hash,
|
||||
n_validators,
|
||||
core_index,
|
||||
session_index,
|
||||
} = collation;
|
||||
|
||||
let persisted_validation_data_hash = validation_data.hash();
|
||||
let parent_head_data = validation_data.parent_head.clone();
|
||||
let parent_head_data_hash = validation_data.parent_head.hash();
|
||||
|
||||
// Apply compression to the block data.
|
||||
let pov = {
|
||||
let pov = collation.proof_of_validity.into_compressed();
|
||||
let encoded_size = pov.encoded_size();
|
||||
|
||||
// As long as `POV_BOMB_LIMIT` is at least `max_pov_size`, this ensures
|
||||
// that honest collators never produce a PoV which is uncompressed.
|
||||
//
|
||||
// As such, honest collators never produce an uncompressed PoV which starts with
|
||||
// a compression magic number, which would lead validators to reject the collation.
|
||||
if encoded_size > validation_data.max_pov_size as usize {
|
||||
return Err(Error::POVSizeExceeded(encoded_size, validation_data.max_pov_size as usize));
|
||||
}
|
||||
|
||||
pov
|
||||
};
|
||||
|
||||
let pov_hash = pov.hash();
|
||||
|
||||
let erasure_root = erasure_root(n_validators, validation_data, pov.clone())?;
|
||||
|
||||
let commitments = CandidateCommitments {
|
||||
upward_messages: collation.upward_messages,
|
||||
horizontal_messages: collation.horizontal_messages,
|
||||
new_validation_code: collation.new_validation_code,
|
||||
head_data: collation.head_data,
|
||||
processed_downward_messages: collation.processed_downward_messages,
|
||||
hrmp_watermark: collation.hrmp_watermark,
|
||||
};
|
||||
|
||||
let receipt = {
|
||||
let ccr = CommittedCandidateReceiptV2 {
|
||||
descriptor: CandidateDescriptorV2::new(
|
||||
para_id,
|
||||
relay_parent,
|
||||
core_index,
|
||||
session_index,
|
||||
persisted_validation_data_hash,
|
||||
pov_hash,
|
||||
erasure_root,
|
||||
commitments.head_data.hash(),
|
||||
validation_code_hash,
|
||||
),
|
||||
commitments: commitments.clone(),
|
||||
};
|
||||
|
||||
ccr.parse_ump_signals(&transposed_claim_queue)
|
||||
.map_err(Error::CandidateReceiptCheck)?;
|
||||
|
||||
ccr.to_plain()
|
||||
};
|
||||
|
||||
gum::debug!(
|
||||
target: LOG_TARGET,
|
||||
candidate_hash = ?receipt.hash(),
|
||||
?pov_hash,
|
||||
?relay_parent,
|
||||
para_id = %para_id,
|
||||
?core_index,
|
||||
"Candidate generated",
|
||||
);
|
||||
gum::trace!(
|
||||
target: LOG_TARGET,
|
||||
?commitments,
|
||||
candidate_hash = ?receipt.hash(),
|
||||
"Candidate commitments",
|
||||
);
|
||||
|
||||
metrics.on_collation_generated();
|
||||
|
||||
sender
|
||||
.send_message(CollatorProtocolMessage::DistributeCollation {
|
||||
candidate_receipt: receipt,
|
||||
parent_head_data_hash,
|
||||
pov,
|
||||
parent_head_data,
|
||||
result_sender,
|
||||
core_index,
|
||||
})
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn erasure_root(
|
||||
n_validators: usize,
|
||||
persisted_validation: PersistedValidationData,
|
||||
pov: PoV,
|
||||
) -> Result<Hash> {
|
||||
let available_data =
|
||||
AvailableData { validation_data: persisted_validation, pov: Arc::new(pov) };
|
||||
|
||||
let chunks = pezkuwi_erasure_coding::obtain_chunks_v1(n_validators, &available_data)?;
|
||||
Ok(pezkuwi_erasure_coding::branches(&chunks).root())
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use pezkuwi_node_subsystem_util::metrics::{self, prometheus};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MetricsInner {
|
||||
pub(crate) collations_generated_total: prometheus::Counter<prometheus::U64>,
|
||||
pub(crate) new_activation: prometheus::Histogram,
|
||||
pub(crate) submit_collation: prometheus::Histogram,
|
||||
}
|
||||
|
||||
/// `CollationGenerationSubsystem` metrics.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct Metrics(pub(crate) Option<MetricsInner>);
|
||||
|
||||
impl Metrics {
|
||||
pub fn on_collation_generated(&self) {
|
||||
if let Some(metrics) = &self.0 {
|
||||
metrics.collations_generated_total.inc();
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide a timer for new activations which updates on drop.
|
||||
pub fn time_new_activation(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
|
||||
self.0.as_ref().map(|metrics| metrics.new_activation.start_timer())
|
||||
}
|
||||
|
||||
/// Provide a timer for submitting a collation which updates on drop.
|
||||
pub fn time_submit_collation(&self) -> Option<metrics::prometheus::prometheus::HistogramTimer> {
|
||||
self.0.as_ref().map(|metrics| metrics.submit_collation.start_timer())
|
||||
}
|
||||
}
|
||||
|
||||
impl metrics::Metrics for Metrics {
|
||||
fn try_register(registry: &prometheus::Registry) -> Result<Self, prometheus::PrometheusError> {
|
||||
let metrics = MetricsInner {
|
||||
collations_generated_total: prometheus::register(
|
||||
prometheus::Counter::new(
|
||||
"pezkuwi_teyrchain_collations_generated_total",
|
||||
"Number of collations generated.",
|
||||
)?,
|
||||
registry,
|
||||
)?,
|
||||
new_activation: prometheus::register(
|
||||
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
|
||||
"pezkuwi_teyrchain_collation_generation_new_activations",
|
||||
"Time spent within fn handle_new_activation",
|
||||
))?,
|
||||
registry,
|
||||
)?,
|
||||
submit_collation: prometheus::register(
|
||||
prometheus::Histogram::with_opts(prometheus::HistogramOpts::new(
|
||||
"pezkuwi_teyrchain_collation_generation_submit_collation",
|
||||
"Time spent preparing and submitting a collation to the network protocol",
|
||||
))?,
|
||||
registry,
|
||||
)?,
|
||||
};
|
||||
Ok(Metrics(Some(metrics)))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,748 @@
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// This file is part of Pezkuwi.
|
||||
|
||||
// Pezkuwi is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Pezkuwi is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use super::*;
|
||||
use assert_matches::assert_matches;
|
||||
use futures::{self, Future, StreamExt};
|
||||
use pezkuwi_node_primitives::{
|
||||
BlockData, Collation, CollationResult, CollatorFn, MaybeCompressedPoV, PoV,
|
||||
};
|
||||
use pezkuwi_node_subsystem::{
|
||||
messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest},
|
||||
ActivatedLeaf,
|
||||
};
|
||||
use pezkuwi_node_subsystem_test_helpers::TestSubsystemContextHandle;
|
||||
use pezkuwi_node_subsystem_util::TimeoutExt;
|
||||
use pezkuwi_primitives::{
|
||||
CandidateDescriptorVersion, CandidateReceiptV2, ClaimQueueOffset, CollatorPair, CoreSelector,
|
||||
PersistedValidationData, UMPSignal, UMP_SEPARATOR,
|
||||
};
|
||||
use pezkuwi_primitives_test_helpers::dummy_head_data;
|
||||
use rstest::rstest;
|
||||
use sp_core::Pair;
|
||||
use sp_keyring::sr25519::Keyring as Sr25519Keyring;
|
||||
use std::{
|
||||
collections::{BTreeMap, VecDeque},
|
||||
sync::Mutex,
|
||||
};
|
||||
|
||||
type VirtualOverseer = TestSubsystemContextHandle<CollationGenerationMessage>;
|
||||
|
||||
fn test_harness<T: Future<Output = VirtualOverseer>>(test: impl FnOnce(VirtualOverseer) -> T) {
|
||||
let pool = sp_core::testing::TaskExecutor::new();
|
||||
let (context, virtual_overseer) =
|
||||
pezkuwi_node_subsystem_test_helpers::make_subsystem_context(pool);
|
||||
let subsystem = async move {
|
||||
let subsystem = crate::CollationGenerationSubsystem::new(Metrics::default());
|
||||
|
||||
subsystem.run(context).await;
|
||||
};
|
||||
|
||||
let test_fut = test(virtual_overseer);
|
||||
|
||||
futures::pin_mut!(test_fut);
|
||||
futures::executor::block_on(futures::future::join(
|
||||
async move {
|
||||
let mut virtual_overseer = test_fut.await;
|
||||
// Ensure we have handled all responses.
|
||||
if let Some(msg) = virtual_overseer.rx.next().timeout(TIMEOUT).await {
|
||||
panic!("Did not handle all responses: {:?}", msg);
|
||||
}
|
||||
// Conclude.
|
||||
virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await;
|
||||
},
|
||||
subsystem,
|
||||
));
|
||||
}
|
||||
|
||||
fn test_collation() -> Collation {
|
||||
Collation {
|
||||
upward_messages: Default::default(),
|
||||
horizontal_messages: Default::default(),
|
||||
new_validation_code: None,
|
||||
head_data: dummy_head_data(),
|
||||
proof_of_validity: MaybeCompressedPoV::Raw(PoV { block_data: BlockData(Vec::new()) }),
|
||||
processed_downward_messages: 0_u32,
|
||||
hrmp_watermark: 0_u32.into(),
|
||||
}
|
||||
}
|
||||
|
||||
struct CoreSelectorData {
|
||||
// The core selector index.
|
||||
index: u8,
|
||||
// The increment value for the core selector index. Normally 1, but can be set to 0 or another
|
||||
// value for testing scenarios where a teyrchain repeatedly selects the same core index.
|
||||
increment_index_by: u8,
|
||||
// The claim queue offset.
|
||||
cq_offset: u8,
|
||||
}
|
||||
|
||||
impl CoreSelectorData {
|
||||
fn new(index: u8, increment_index_by: u8, cq_offset: u8) -> Self {
|
||||
Self { index, increment_index_by, cq_offset }
|
||||
}
|
||||
}
|
||||
|
||||
struct State {
|
||||
core_selector_data: Option<CoreSelectorData>,
|
||||
}
|
||||
|
||||
impl State {
|
||||
fn new(core_selector_data: Option<CoreSelectorData>) -> Self {
|
||||
Self { core_selector_data }
|
||||
}
|
||||
}
|
||||
|
||||
struct TestCollator {
|
||||
state: Arc<Mutex<State>>,
|
||||
}
|
||||
|
||||
impl TestCollator {
|
||||
fn new(core_selector_data: Option<CoreSelectorData>) -> Self {
|
||||
Self { state: Arc::new(Mutex::new(State::new(core_selector_data))) }
|
||||
}
|
||||
|
||||
pub fn create_collation_function(&self) -> CollatorFn {
|
||||
let state = Arc::clone(&self.state);
|
||||
|
||||
Box::new(move |_relay_parent: Hash, _validation_data: &PersistedValidationData| {
|
||||
let mut collation = test_collation();
|
||||
let mut state_guard = state.lock().unwrap();
|
||||
|
||||
if let Some(core_selector_data) = &mut state_guard.core_selector_data {
|
||||
collation.upward_messages.force_push(UMP_SEPARATOR);
|
||||
collation.upward_messages.force_push(
|
||||
UMPSignal::SelectCore(
|
||||
CoreSelector(core_selector_data.index),
|
||||
ClaimQueueOffset(core_selector_data.cq_offset),
|
||||
)
|
||||
.encode(),
|
||||
);
|
||||
core_selector_data.index += core_selector_data.increment_index_by;
|
||||
}
|
||||
|
||||
async move { Some(CollationResult { collation, result_sender: None }) }.boxed()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000);
|
||||
|
||||
async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages {
|
||||
overseer
|
||||
.recv()
|
||||
.timeout(TIMEOUT)
|
||||
.await
|
||||
.expect(&format!("{:?} is long enough to receive messages", TIMEOUT))
|
||||
}
|
||||
|
||||
fn test_config<Id: Into<ParaId>>(
|
||||
para_id: Id,
|
||||
core_selector_data: Option<CoreSelectorData>,
|
||||
) -> CollationGenerationConfig {
|
||||
let test_collator = TestCollator::new(core_selector_data);
|
||||
CollationGenerationConfig {
|
||||
key: CollatorPair::generate().0,
|
||||
collator: Some(test_collator.create_collation_function()),
|
||||
para_id: para_id.into(),
|
||||
}
|
||||
}
|
||||
|
||||
fn test_config_no_collator<Id: Into<ParaId>>(para_id: Id) -> CollationGenerationConfig {
|
||||
CollationGenerationConfig {
|
||||
key: CollatorPair::generate().0,
|
||||
collator: None,
|
||||
para_id: para_id.into(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn submit_collation_is_no_op_before_initialization() {
|
||||
test_harness(|mut virtual_overseer| async move {
|
||||
virtual_overseer
|
||||
.send(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams {
|
||||
relay_parent: Hash::repeat_byte(0),
|
||||
collation: test_collation(),
|
||||
parent_head: vec![1, 2, 3].into(),
|
||||
validation_code_hash: Hash::repeat_byte(1).into(),
|
||||
result_sender: None,
|
||||
core_index: CoreIndex(0),
|
||||
}),
|
||||
})
|
||||
.await;
|
||||
|
||||
virtual_overseer
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn submit_collation_leads_to_distribution() {
|
||||
let relay_parent = Hash::repeat_byte(0);
|
||||
let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42));
|
||||
let parent_head = dummy_head_data();
|
||||
let para_id = ParaId::from(5);
|
||||
let expected_pvd = PersistedValidationData {
|
||||
parent_head: parent_head.clone(),
|
||||
relay_parent_number: 10,
|
||||
relay_parent_storage_root: Hash::repeat_byte(1),
|
||||
max_pov_size: 1024,
|
||||
};
|
||||
|
||||
test_harness(|mut virtual_overseer| async move {
|
||||
virtual_overseer
|
||||
.send(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::Initialize(test_config_no_collator(para_id)),
|
||||
})
|
||||
.await;
|
||||
|
||||
virtual_overseer
|
||||
.send(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams {
|
||||
relay_parent,
|
||||
collation: test_collation(),
|
||||
parent_head: dummy_head_data(),
|
||||
validation_code_hash,
|
||||
result_sender: None,
|
||||
core_index: CoreIndex(0),
|
||||
}),
|
||||
})
|
||||
.await;
|
||||
|
||||
helpers::handle_runtime_calls_on_submit_collation(
|
||||
&mut virtual_overseer,
|
||||
relay_parent,
|
||||
para_id,
|
||||
expected_pvd.clone(),
|
||||
[(CoreIndex(0), VecDeque::from([para_id]))].into(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_matches!(
|
||||
overseer_recv(&mut virtual_overseer).await,
|
||||
AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation {
|
||||
candidate_receipt,
|
||||
parent_head_data_hash,
|
||||
..
|
||||
}) => {
|
||||
let CandidateReceiptV2 { descriptor, .. } = candidate_receipt;
|
||||
assert_eq!(parent_head_data_hash, parent_head.hash());
|
||||
assert_eq!(descriptor.persisted_validation_data_hash(), expected_pvd.hash());
|
||||
assert_eq!(descriptor.para_head(), dummy_head_data().hash());
|
||||
assert_eq!(descriptor.validation_code_hash(), validation_code_hash);
|
||||
}
|
||||
);
|
||||
|
||||
virtual_overseer
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn distribute_collation_only_for_assigned_para_id_at_offset_0() {
|
||||
let activated_hash: Hash = [1; 32].into();
|
||||
let para_id = ParaId::from(5);
|
||||
|
||||
let claim_queue = (0..=5)
|
||||
.into_iter()
|
||||
// Set all cores assigned to para_id 5 at the second and third depths. This shouldn't
|
||||
// matter.
|
||||
.map(|idx| (CoreIndex(idx), VecDeque::from([ParaId::from(idx), para_id, para_id])))
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
|
||||
test_harness(|mut virtual_overseer| async move {
|
||||
helpers::initialize_collator(&mut virtual_overseer, para_id, None).await;
|
||||
helpers::activate_new_head(&mut virtual_overseer, activated_hash).await;
|
||||
helpers::handle_runtime_calls_on_new_head_activation(
|
||||
&mut virtual_overseer,
|
||||
activated_hash,
|
||||
claim_queue,
|
||||
)
|
||||
.await;
|
||||
|
||||
helpers::handle_cores_processing_for_a_leaf(
|
||||
&mut virtual_overseer,
|
||||
activated_hash,
|
||||
para_id,
|
||||
vec![5], // Only core 5 is assigned to paraid 5.
|
||||
)
|
||||
.await;
|
||||
|
||||
virtual_overseer
|
||||
});
|
||||
}
|
||||
|
||||
// There are variable number of cores assigned to the paraid.
|
||||
// On new head activation `CollationGeneration` should produce and distribute the right number of
|
||||
// new collations with proper assumption about the para candidate chain availability at next block.
|
||||
#[rstest]
|
||||
#[case(0)]
|
||||
#[case(1)]
|
||||
#[case(2)]
|
||||
#[case(3)]
|
||||
fn distribute_collation_with_elastic_scaling(#[case] total_cores: u32) {
|
||||
let activated_hash: Hash = [1; 32].into();
|
||||
let para_id = ParaId::from(5);
|
||||
|
||||
let claim_queue = (0..total_cores)
|
||||
.into_iter()
|
||||
.map(|idx| (CoreIndex(idx), VecDeque::from([para_id])))
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
|
||||
test_harness(|mut virtual_overseer| async move {
|
||||
helpers::initialize_collator(&mut virtual_overseer, para_id, None).await;
|
||||
helpers::activate_new_head(&mut virtual_overseer, activated_hash).await;
|
||||
helpers::handle_runtime_calls_on_new_head_activation(
|
||||
&mut virtual_overseer,
|
||||
activated_hash,
|
||||
claim_queue,
|
||||
)
|
||||
.await;
|
||||
|
||||
helpers::handle_cores_processing_for_a_leaf(
|
||||
&mut virtual_overseer,
|
||||
activated_hash,
|
||||
para_id,
|
||||
(0..total_cores).collect(),
|
||||
)
|
||||
.await;
|
||||
|
||||
virtual_overseer
|
||||
});
|
||||
}
|
||||
|
||||
// Tests when submission core indexes need to be selected using the core selectors provided in the
|
||||
// UMP signals. The core selector index is an increasing number that can start with a non-negative
|
||||
// value (even greater than the core index), but the collation generation protocol uses the
|
||||
// remainder to select the core. UMP signals may also contain a claim queue offset, based on which
|
||||
// we need to select the assigned core indexes for the para from that offset in the claim queue.
|
||||
#[rstest]
|
||||
#[case(1, 0, 0)]
|
||||
#[case(2, 0, 1)]
|
||||
fn distribute_collation_with_core_selectors(
|
||||
#[case] total_cores: u32,
|
||||
// The core selector index that will be obtained from the first collation.
|
||||
#[case] init_cs_index: u8,
|
||||
// Claim queue offset where the assigned cores will be stored.
|
||||
#[case] cq_offset: u8,
|
||||
) {
|
||||
let activated_hash: Hash = [1; 32].into();
|
||||
let para_id = ParaId::from(5);
|
||||
let other_para_id = ParaId::from(10);
|
||||
|
||||
let claim_queue = (0..total_cores)
|
||||
.into_iter()
|
||||
.map(|idx| {
|
||||
// Set all cores assigned to para_id 5 at the cq_offset depth.
|
||||
let mut vec = VecDeque::from(vec![other_para_id; cq_offset as usize]);
|
||||
vec.push_back(para_id);
|
||||
(CoreIndex(idx), vec)
|
||||
})
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
|
||||
test_harness(|mut virtual_overseer| async move {
|
||||
helpers::initialize_collator(
|
||||
&mut virtual_overseer,
|
||||
para_id,
|
||||
Some(CoreSelectorData::new(init_cs_index, 1, cq_offset)),
|
||||
)
|
||||
.await;
|
||||
helpers::activate_new_head(&mut virtual_overseer, activated_hash).await;
|
||||
helpers::handle_runtime_calls_on_new_head_activation(
|
||||
&mut virtual_overseer,
|
||||
activated_hash,
|
||||
claim_queue,
|
||||
)
|
||||
.await;
|
||||
|
||||
let mut cores_assigned = (0..total_cores).collect::<Vec<_>>();
|
||||
if total_cores > 1 && init_cs_index > 0 {
|
||||
// We need to rotate the list of cores because the first core selector index was
|
||||
// non-zero, which should change the sequence of submissions. However, collations should
|
||||
// still be submitted on all cores.
|
||||
cores_assigned.rotate_left((init_cs_index as u32 % total_cores) as usize);
|
||||
}
|
||||
helpers::handle_cores_processing_for_a_leaf(
|
||||
&mut virtual_overseer,
|
||||
activated_hash,
|
||||
para_id,
|
||||
cores_assigned,
|
||||
)
|
||||
.await;
|
||||
|
||||
virtual_overseer
|
||||
});
|
||||
}
|
||||
|
||||
// Tests the behavior when a teyrchain repeatedly selects the same core index.
|
||||
// Ensures that the system handles this behavior correctly while maintaining expected functionality.
|
||||
#[rstest]
|
||||
#[case(3, 0, vec![0])]
|
||||
#[case(3, 1, vec![0, 1, 2])]
|
||||
#[case(3, 2, vec![0, 2, 1])]
|
||||
#[case(3, 3, vec![0])]
|
||||
#[case(3, 4, vec![0, 1, 2])]
|
||||
fn distribute_collation_with_repeated_core_selector_index(
|
||||
#[case] total_cores: u32,
|
||||
#[case] increment_cs_index_by: u8,
|
||||
#[case] expected_selected_cores: Vec<u32>,
|
||||
) {
|
||||
let activated_hash: Hash = [1; 32].into();
|
||||
let para_id = ParaId::from(5);
|
||||
|
||||
let claim_queue = (0..total_cores)
|
||||
.into_iter()
|
||||
.map(|idx| (CoreIndex(idx), VecDeque::from([para_id])))
|
||||
.collect::<BTreeMap<_, _>>();
|
||||
|
||||
test_harness(|mut virtual_overseer| async move {
|
||||
helpers::initialize_collator(
|
||||
&mut virtual_overseer,
|
||||
para_id,
|
||||
Some(CoreSelectorData::new(0, increment_cs_index_by, 0)),
|
||||
)
|
||||
.await;
|
||||
helpers::activate_new_head(&mut virtual_overseer, activated_hash).await;
|
||||
helpers::handle_runtime_calls_on_new_head_activation(
|
||||
&mut virtual_overseer,
|
||||
activated_hash,
|
||||
claim_queue,
|
||||
)
|
||||
.await;
|
||||
|
||||
helpers::handle_cores_processing_for_a_leaf(
|
||||
&mut virtual_overseer,
|
||||
activated_hash,
|
||||
para_id,
|
||||
expected_selected_cores,
|
||||
)
|
||||
.await;
|
||||
|
||||
virtual_overseer
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn v2_receipts_failed_core_index_check() {
|
||||
let relay_parent = Hash::repeat_byte(0);
|
||||
let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42));
|
||||
let parent_head = dummy_head_data();
|
||||
let para_id = ParaId::from(5);
|
||||
let expected_pvd = PersistedValidationData {
|
||||
parent_head: parent_head.clone(),
|
||||
relay_parent_number: 10,
|
||||
relay_parent_storage_root: Hash::repeat_byte(1),
|
||||
max_pov_size: 1024,
|
||||
};
|
||||
|
||||
test_harness(|mut virtual_overseer| async move {
|
||||
virtual_overseer
|
||||
.send(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::Initialize(test_config_no_collator(para_id)),
|
||||
})
|
||||
.await;
|
||||
|
||||
virtual_overseer
|
||||
.send(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams {
|
||||
relay_parent,
|
||||
collation: test_collation(),
|
||||
parent_head: dummy_head_data(),
|
||||
validation_code_hash,
|
||||
result_sender: None,
|
||||
core_index: CoreIndex(0),
|
||||
}),
|
||||
})
|
||||
.await;
|
||||
|
||||
helpers::handle_runtime_calls_on_submit_collation(
|
||||
&mut virtual_overseer,
|
||||
relay_parent,
|
||||
para_id,
|
||||
expected_pvd.clone(),
|
||||
// Core index commitment is on core 0 but don't add any assignment for core 0.
|
||||
[(CoreIndex(1), [para_id].into_iter().collect())].into_iter().collect(),
|
||||
)
|
||||
.await;
|
||||
|
||||
// No collation is distributed.
|
||||
|
||||
virtual_overseer
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Verify that an ApprovedPeer UMP signal does not break the subsystem (DistributeCollation is
|
||||
// sent), assuming CandidateReceiptV2 node feature is enabled.
|
||||
fn approved_peer_signal() {
|
||||
let relay_parent = Hash::repeat_byte(0);
|
||||
let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42));
|
||||
let parent_head = dummy_head_data();
|
||||
let para_id = ParaId::from(5);
|
||||
let expected_pvd = PersistedValidationData {
|
||||
parent_head: parent_head.clone(),
|
||||
relay_parent_number: 10,
|
||||
relay_parent_storage_root: Hash::repeat_byte(1),
|
||||
max_pov_size: 1024,
|
||||
};
|
||||
|
||||
test_harness(|mut virtual_overseer| async move {
|
||||
virtual_overseer
|
||||
.send(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::Initialize(test_config_no_collator(para_id)),
|
||||
})
|
||||
.await;
|
||||
|
||||
let mut collation = test_collation();
|
||||
collation.upward_messages.force_push(UMP_SEPARATOR);
|
||||
collation
|
||||
.upward_messages
|
||||
.force_push(UMPSignal::ApprovedPeer(vec![1, 2, 3, 4, 5].try_into().unwrap()).encode());
|
||||
|
||||
virtual_overseer
|
||||
.send(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams {
|
||||
relay_parent,
|
||||
collation,
|
||||
parent_head: dummy_head_data(),
|
||||
validation_code_hash,
|
||||
result_sender: None,
|
||||
core_index: CoreIndex(0),
|
||||
}),
|
||||
})
|
||||
.await;
|
||||
|
||||
helpers::handle_runtime_calls_on_submit_collation(
|
||||
&mut virtual_overseer,
|
||||
relay_parent,
|
||||
para_id,
|
||||
expected_pvd.clone(),
|
||||
[(CoreIndex(0), [para_id].into_iter().collect())].into_iter().collect(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_matches!(
|
||||
overseer_recv(&mut virtual_overseer).await,
|
||||
AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation {
|
||||
candidate_receipt,
|
||||
parent_head_data_hash,
|
||||
..
|
||||
}) => {
|
||||
let CandidateReceiptV2 { descriptor, .. } = candidate_receipt;
|
||||
assert_eq!(parent_head_data_hash, parent_head.hash());
|
||||
assert_eq!(descriptor.persisted_validation_data_hash(), expected_pvd.hash());
|
||||
assert_eq!(descriptor.para_head(), dummy_head_data().hash());
|
||||
assert_eq!(descriptor.validation_code_hash(), validation_code_hash);
|
||||
assert_eq!(descriptor.version(), CandidateDescriptorVersion::V2);
|
||||
}
|
||||
);
|
||||
|
||||
virtual_overseer
|
||||
});
|
||||
}
|
||||
|
||||
mod helpers {
|
||||
use super::*;
|
||||
use std::collections::{BTreeMap, VecDeque};
|
||||
|
||||
// Sends `Initialize` with a collator config
|
||||
pub async fn initialize_collator(
|
||||
virtual_overseer: &mut VirtualOverseer,
|
||||
para_id: ParaId,
|
||||
core_selector_data: Option<CoreSelectorData>,
|
||||
) {
|
||||
virtual_overseer
|
||||
.send(FromOrchestra::Communication {
|
||||
msg: CollationGenerationMessage::Initialize(test_config(
|
||||
para_id,
|
||||
core_selector_data,
|
||||
)),
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
// Sends `ActiveLeaves` for a single leaf with the specified hash. Block number is hardcoded.
|
||||
pub async fn activate_new_head(virtual_overseer: &mut VirtualOverseer, activated_hash: Hash) {
|
||||
virtual_overseer
|
||||
.send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate {
|
||||
activated: Some(ActivatedLeaf {
|
||||
hash: activated_hash,
|
||||
number: 10,
|
||||
unpin_handle: pezkuwi_node_subsystem_test_helpers::mock::dummy_unpin_handle(
|
||||
activated_hash,
|
||||
),
|
||||
}),
|
||||
..Default::default()
|
||||
})))
|
||||
.await;
|
||||
}
|
||||
|
||||
// Handle all runtime calls performed in `handle_new_activation`.
|
||||
pub async fn handle_runtime_calls_on_new_head_activation(
|
||||
virtual_overseer: &mut VirtualOverseer,
|
||||
activated_hash: Hash,
|
||||
claim_queue: BTreeMap<CoreIndex, VecDeque<ParaId>>,
|
||||
) {
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::SessionIndexForChild(tx))) => {
|
||||
assert_eq!(hash, activated_hash);
|
||||
tx.send(Ok(1)).unwrap();
|
||||
}
|
||||
);
|
||||
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::Validators(tx))) => {
|
||||
assert_eq!(hash, activated_hash);
|
||||
tx.send(Ok(vec![
|
||||
Sr25519Keyring::Alice.public().into(),
|
||||
Sr25519Keyring::Bob.public().into(),
|
||||
Sr25519Keyring::Charlie.public().into(),
|
||||
])).unwrap();
|
||||
}
|
||||
);
|
||||
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::ClaimQueue(tx))) => {
|
||||
assert_eq!(hash, activated_hash);
|
||||
tx.send(Ok(claim_queue)).unwrap();
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// Handles all runtime requests performed in `handle_new_activation` for the case when a
|
||||
// collation should be prepared for the new leaf
|
||||
pub async fn handle_cores_processing_for_a_leaf(
|
||||
virtual_overseer: &mut VirtualOverseer,
|
||||
activated_hash: Hash,
|
||||
para_id: ParaId,
|
||||
cores_assigned: Vec<u32>,
|
||||
) {
|
||||
// Expect no messages if no cores is assigned to the para
|
||||
if cores_assigned.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Some hardcoded data - if needed, extract to parameters
|
||||
let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42));
|
||||
let parent_head = dummy_head_data();
|
||||
let pvd = PersistedValidationData {
|
||||
parent_head: parent_head.clone(),
|
||||
relay_parent_number: 10,
|
||||
relay_parent_storage_root: Hash::repeat_byte(1),
|
||||
max_pov_size: 1024,
|
||||
};
|
||||
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => {
|
||||
assert_eq!(hash, activated_hash);
|
||||
assert_eq!(id, para_id);
|
||||
assert_eq!(a, OccupiedCoreAssumption::Included);
|
||||
|
||||
let _ = tx.send(Ok(Some(pvd.clone())));
|
||||
}
|
||||
);
|
||||
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
|
||||
hash,
|
||||
RuntimeApiRequest::ValidationCodeHash(
|
||||
id,
|
||||
assumption,
|
||||
tx,
|
||||
),
|
||||
)) => {
|
||||
assert_eq!(hash, activated_hash);
|
||||
assert_eq!(id, para_id);
|
||||
assert_eq!(assumption, OccupiedCoreAssumption::Included);
|
||||
|
||||
let _ = tx.send(Ok(Some(validation_code_hash)));
|
||||
}
|
||||
);
|
||||
|
||||
for core in cores_assigned {
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation{
|
||||
candidate_receipt,
|
||||
parent_head_data_hash,
|
||||
core_index,
|
||||
..
|
||||
}) => {
|
||||
assert_eq!(CoreIndex(core), core_index);
|
||||
assert_eq!(parent_head_data_hash, parent_head.hash());
|
||||
assert_eq!(candidate_receipt.descriptor().persisted_validation_data_hash(), pvd.hash());
|
||||
assert_eq!(candidate_receipt.descriptor().para_head(), dummy_head_data().hash());
|
||||
assert_eq!(candidate_receipt.descriptor().validation_code_hash(), validation_code_hash);
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Handles all runtime requests performed in `handle_submit_collation`
|
||||
pub async fn handle_runtime_calls_on_submit_collation(
|
||||
virtual_overseer: &mut VirtualOverseer,
|
||||
relay_parent: Hash,
|
||||
para_id: ParaId,
|
||||
expected_pvd: PersistedValidationData,
|
||||
claim_queue: BTreeMap<CoreIndex, VecDeque<ParaId>>,
|
||||
) {
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => {
|
||||
assert_eq!(rp, relay_parent);
|
||||
assert_eq!(id, para_id);
|
||||
assert_eq!(a, OccupiedCoreAssumption::TimedOut);
|
||||
|
||||
tx.send(Ok(Some(expected_pvd))).unwrap();
|
||||
}
|
||||
);
|
||||
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::RuntimeApi(RuntimeApiMessage::Request(
|
||||
rp,
|
||||
RuntimeApiRequest::ClaimQueue(tx),
|
||||
)) => {
|
||||
assert_eq!(rp, relay_parent);
|
||||
tx.send(Ok(claim_queue)).unwrap();
|
||||
}
|
||||
);
|
||||
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::SessionIndexForChild(tx))) => {
|
||||
assert_eq!(rp, relay_parent);
|
||||
tx.send(Ok(1)).unwrap();
|
||||
}
|
||||
);
|
||||
|
||||
assert_matches!(
|
||||
overseer_recv(virtual_overseer).await,
|
||||
AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::Validators(tx))) => {
|
||||
assert_eq!(rp, relay_parent);
|
||||
tx.send(Ok(vec![
|
||||
Sr25519Keyring::Alice.public().into(),
|
||||
Sr25519Keyring::Bob.public().into(),
|
||||
Sr25519Keyring::Charlie.public().into(),
|
||||
])).unwrap();
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user