Introduce CollectCollationInfo runtime api (#443)

* Introduce `CollectCollationInfo` runtime api

Instead of using well known keys to communicate information about a
collation between the runtime and the collator, we now use a runtime api
for this.

* Fixes bug

* Apply suggestions from code review

Co-authored-by: Sergei Shulepov <sergei@parity.io>

* Doc update

Co-authored-by: Sergei Shulepov <sergei@parity.io>
This commit is contained in:
Bastian Köcher
2021-05-17 16:33:33 +02:00
committed by GitHub
parent 3bcd7f0fd8
commit d458d2622b
14 changed files with 218 additions and 279 deletions
+2
View File
@@ -1283,6 +1283,7 @@ dependencies = [
"polkadot-overseer",
"polkadot-primitives",
"sc-client-api",
"sp-api",
"sp-blockchain",
"sp-consensus",
"sp-core",
@@ -1565,6 +1566,7 @@ dependencies = [
"polkadot-core-primitives",
"polkadot-parachain",
"polkadot-primitives",
"sp-api",
"sp-runtime",
"sp-std",
"sp-trie",
+1 -1
View File
@@ -10,7 +10,7 @@ sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master
sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
# Polkadot dependencies
+44 -113
View File
@@ -17,18 +17,16 @@
//! Cumulus Collator implementation for Substrate.
use cumulus_client_network::WaitToAnnounce;
use cumulus_primitives_core::{
well_known_keys, OutboundHrmpMessage, ParachainBlockData, PersistedValidationData,
};
use cumulus_primitives_core::{CollectCollationInfo, ParachainBlockData, PersistedValidationData};
use sc_client_api::BlockBackend;
use sp_api::ProvideRuntimeApi;
use sp_consensus::BlockStatus;
use sp_core::traits::SpawnNamed;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, Zero},
};
use sp_state_machine::InspectState;
use cumulus_client_consensus_common::ParachainConsensus;
use polkadot_node_primitives::{
@@ -36,9 +34,7 @@ use polkadot_node_primitives::{
};
use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage};
use polkadot_overseer::OverseerHandler;
use polkadot_primitives::v1::{
BlockNumber as PBlockNumber, CollatorPair, Hash as PHash, HeadData, Id as ParaId, UpwardMessage,
};
use polkadot_primitives::v1::{CollatorPair, Hash as PHash, HeadData, Id as ParaId};
use codec::{Decode, Encode};
use futures::{channel::oneshot, FutureExt};
@@ -50,36 +46,37 @@ use tracing::Instrument;
const LOG_TARGET: &str = "cumulus-collator";
/// The implementation of the Cumulus `Collator`.
pub struct Collator<Block: BlockT, BS, Backend> {
pub struct Collator<Block: BlockT, BS, RA> {
block_status: Arc<BS>,
parachain_consensus: Box<dyn ParachainConsensus<Block>>,
wait_to_announce: Arc<Mutex<WaitToAnnounce<Block>>>,
backend: Arc<Backend>,
runtime_api: Arc<RA>,
}
impl<Block: BlockT, BS, Backend> Clone for Collator<Block, BS, Backend> {
impl<Block: BlockT, BS, RA> Clone for Collator<Block, BS, RA> {
fn clone(&self) -> Self {
Self {
block_status: self.block_status.clone(),
wait_to_announce: self.wait_to_announce.clone(),
backend: self.backend.clone(),
parachain_consensus: self.parachain_consensus.clone(),
runtime_api: self.runtime_api.clone(),
}
}
}
impl<Block, BS, Backend> Collator<Block, BS, Backend>
impl<Block, BS, RA> Collator<Block, BS, RA>
where
Block: BlockT,
BS: BlockBackend<Block>,
Backend: sc_client_api::Backend<Block> + 'static,
RA: ProvideRuntimeApi<Block>,
RA::Api: CollectCollationInfo<Block>,
{
/// Create a new instance.
fn new(
block_status: Arc<BS>,
spawner: Arc<dyn SpawnNamed + Send + Sync>,
announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
backend: Arc<Backend>,
runtime_api: Arc<RA>,
parachain_consensus: Box<dyn ParachainConsensus<Block>>,
) -> Self {
let wait_to_announce = Arc::new(Mutex::new(WaitToAnnounce::new(spawner, announce_block)));
@@ -87,7 +84,7 @@ where
Self {
block_status,
wait_to_announce,
backend,
runtime_api,
parachain_consensus,
}
}
@@ -154,103 +151,35 @@ where
&mut self,
block: ParachainBlockData<Block>,
block_hash: Block::Hash,
relay_block_number: PBlockNumber,
) -> Option<Collation> {
let block_data = BlockData(block.encode());
let header = block.into_header();
let head_data = HeadData(header.encode());
let state = match self.backend.state_at(BlockId::Hash(block_hash)) {
Ok(state) => state,
let collation_info = match self
.runtime_api
.runtime_api()
.collect_collation_info(&BlockId::Hash(block_hash))
{
Ok(ci) => ci,
Err(e) => {
tracing::error!(
target: LOG_TARGET,
error = ?e,
"Failed to get state of the freshly built block.",
"Failed to collect collation info.",
);
return None;
}
};
state.inspect_state(|| {
let upward_messages = sp_io::storage::get(well_known_keys::UPWARD_MESSAGES);
let upward_messages =
match upward_messages.map(|v| Vec::<UpwardMessage>::decode(&mut &v[..])) {
Some(Ok(msgs)) => msgs,
Some(Err(e)) => {
tracing::error!(
target: LOG_TARGET,
error = ?e,
"Failed to decode upward messages from the build block.",
);
return None;
}
None => Vec::new(),
};
let new_validation_code = sp_io::storage::get(well_known_keys::NEW_VALIDATION_CODE);
let processed_downward_messages =
sp_io::storage::get(well_known_keys::PROCESSED_DOWNWARD_MESSAGES);
let processed_downward_messages =
match processed_downward_messages.map(|v| u32::decode(&mut &v[..])) {
Some(Ok(processed_cnt)) => processed_cnt,
Some(Err(e)) => {
tracing::error!(
target: LOG_TARGET,
error = ?e,
"Failed to decode the count of processed downward message.",
);
return None;
}
None => 0,
};
let horizontal_messages = sp_io::storage::get(well_known_keys::HRMP_OUTBOUND_MESSAGES);
let horizontal_messages = match horizontal_messages
.map(|v| Vec::<OutboundHrmpMessage>::decode(&mut &v[..]))
{
Some(Ok(horizontal_messages)) => horizontal_messages,
Some(Err(e)) => {
tracing::error!(
target: LOG_TARGET,
error = ?e,
"Failed to decode the horizontal messages.",
);
return None;
}
None => Vec::new(),
};
let hrmp_watermark = sp_io::storage::get(well_known_keys::HRMP_WATERMARK);
let hrmp_watermark = match hrmp_watermark.map(|v| PBlockNumber::decode(&mut &v[..])) {
Some(Ok(hrmp_watermark)) => hrmp_watermark,
Some(Err(e)) => {
tracing::error!(
target: LOG_TARGET,
error = ?e,
"Failed to decode the HRMP watermark."
);
return None;
}
None => {
// If the runtime didn't set `HRMP_WATERMARK`, then it means no messages were
// supplied via the message ingestion inherent. Assuming that the PVF/runtime
// checks that legitly there are no pending messages we can therefore move the
// watermark up to the relay-block number.
relay_block_number
}
};
Some(Collation {
upward_messages,
new_validation_code: new_validation_code.map(Into::into),
head_data,
proof_of_validity: PoV { block_data },
processed_downward_messages,
horizontal_messages,
hrmp_watermark,
})
Some(Collation {
upward_messages: collation_info.upward_messages,
new_validation_code: collation_info.new_validation_code,
processed_downward_messages: collation_info.processed_downward_messages,
horizontal_messages: collation_info.horizontal_messages,
hrmp_watermark: collation_info.hrmp_watermark,
head_data,
proof_of_validity: PoV { block_data },
})
}
@@ -308,7 +237,7 @@ where
);
let block_hash = b.header().hash();
let collation = self.build_collation(b, block_hash, validation_data.relay_parent_number)?;
let collation = self.build_collation(b, block_hash)?;
let (result_sender, signed_stmt_recv) = oneshot::channel();
@@ -330,9 +259,9 @@ where
}
/// Parameters for [`start_collator`].
pub struct StartCollatorParams<Block: BlockT, Backend, BS, Spawner> {
pub struct StartCollatorParams<Block: BlockT, RA, BS, Spawner> {
pub para_id: ParaId,
pub backend: Arc<Backend>,
pub runtime_api: Arc<RA>,
pub block_status: Arc<BS>,
pub announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
pub overseer_handler: OverseerHandler,
@@ -342,7 +271,7 @@ pub struct StartCollatorParams<Block: BlockT, Backend, BS, Spawner> {
}
/// Start the collator.
pub async fn start_collator<Block, Backend, BS, Spawner>(
pub async fn start_collator<Block, RA, BS, Spawner>(
StartCollatorParams {
para_id,
block_status,
@@ -351,19 +280,20 @@ pub async fn start_collator<Block, Backend, BS, Spawner>(
spawner,
key,
parachain_consensus,
backend,
}: StartCollatorParams<Block, Backend, BS, Spawner>,
runtime_api,
}: StartCollatorParams<Block, RA, BS, Spawner>,
) where
Block: BlockT,
Backend: sc_client_api::Backend<Block> + 'static,
BS: BlockBackend<Block> + Send + Sync + 'static,
Spawner: SpawnNamed + Clone + Send + Sync + 'static,
RA: ProvideRuntimeApi<Block> + Send + Sync + 'static,
RA::Api: CollectCollationInfo<Block>,
{
let collator = Collator::new(
block_status,
Arc::new(spawner),
announce_block,
backend,
runtime_api,
parachain_consensus,
);
@@ -400,13 +330,15 @@ mod tests {
use cumulus_test_runtime::{Block, Header};
use futures::{channel::mpsc, executor::block_on, StreamExt};
use polkadot_node_subsystem_test_helpers::ForwardSubsystem;
use polkadot_overseer::{AllSubsystems, Overseer, HeadSupportsParachains};
use polkadot_overseer::{AllSubsystems, HeadSupportsParachains, Overseer};
use sp_consensus::BlockOrigin;
use sp_core::{testing::TaskExecutor, Pair};
struct AlwaysSupportsParachains;
impl HeadSupportsParachains for AlwaysSupportsParachains {
fn head_supports_parachains(&self, _head: &PHash) -> bool { true }
fn head_supports_parachains(&self, _head: &PHash) -> bool {
true
}
}
#[derive(Clone)]
@@ -450,9 +382,7 @@ mod tests {
let spawner = TaskExecutor::new();
let para_id = ParaId::from(100);
let announce_block = |_, _| ();
let client_builder = TestClientBuilder::new();
let backend = client_builder.backend();
let client = Arc::new(client_builder.build());
let client = Arc::new(TestClientBuilder::new().build());
let header = client.header(&BlockId::Number(0)).unwrap().unwrap();
let (sub_tx, sub_rx) = mpsc::channel(64);
@@ -465,12 +395,13 @@ mod tests {
None,
AlwaysSupportsParachains,
spawner.clone(),
).expect("Creates overseer");
)
.expect("Creates overseer");
spawner.spawn("overseer", overseer.run().then(|_| async { () }).boxed());
let collator_start = start_collator(StartCollatorParams {
backend,
runtime_api: client.clone(),
block_status: client.clone(),
announce_block: Arc::new(announce_block),
overseer_handler: handler,
+8 -7
View File
@@ -19,7 +19,7 @@
//! Provides functions for starting a collator node or a normal full node.
use cumulus_client_consensus_common::ParachainConsensus;
use cumulus_primitives_core::ParaId;
use cumulus_primitives_core::{CollectCollationInfo, ParaId};
use futures::FutureExt;
use polkadot_primitives::v1::{Block as PBlock, CollatorPair};
use polkadot_service::{AbstractClient, Client as PClient, ClientHandle, RuntimeApiCollection};
@@ -28,6 +28,7 @@ use sc_client_api::{
};
use sc_service::{error::Result as ServiceResult, Configuration, Role, TaskManager};
use sc_telemetry::TelemetryWorkerHandle;
use sp_api::ProvideRuntimeApi;
use sp_blockchain::HeaderBackend;
use sp_consensus::BlockImport;
use sp_core::traits::SpawnNamed;
@@ -40,8 +41,7 @@ pub mod genesis;
type RFullNode<C> = polkadot_service::NewFull<C>;
/// Parameters given to [`start_collator`].
pub struct StartCollatorParams<'a, Block: BlockT, BS, Client, Backend, Spawner, RClient> {
pub backend: Arc<Backend>,
pub struct StartCollatorParams<'a, Block: BlockT, BS, Client, Spawner, RClient> {
pub block_status: Arc<BS>,
pub client: Arc<Client>,
pub announce_block: Arc<dyn Fn(Block::Hash, Option<Vec<u8>>) + Send + Sync>,
@@ -60,7 +60,6 @@ pub struct StartCollatorParams<'a, Block: BlockT, BS, Client, Backend, Spawner,
/// parachain validator for validation and inclusion into the relay chain.
pub async fn start_collator<'a, Block, BS, Client, Backend, Spawner, RClient>(
StartCollatorParams {
backend,
block_status,
client,
announce_block,
@@ -70,7 +69,7 @@ pub async fn start_collator<'a, Block, BS, Client, Backend, Spawner, RClient>(
task_manager,
relay_chain_full_node,
parachain_consensus,
}: StartCollatorParams<'a, Block, BS, Client, Backend, Spawner, RClient>,
}: StartCollatorParams<'a, Block, BS, Client, Spawner, RClient>,
) -> sc_service::error::Result<()>
where
Block: BlockT,
@@ -82,11 +81,13 @@ where
+ Sync
+ BlockBackend<Block>
+ BlockchainEvents<Block>
+ ProvideRuntimeApi<Block>
+ 'static,
Client::Api: CollectCollationInfo<Block>,
for<'b> &'b Client: BlockImport<Block>,
Backend: BackendT<Block> + 'static,
Spawner: SpawnNamed + Clone + Send + Sync + 'static,
RClient: ClientHandle,
Backend: BackendT<Block> + 'static,
{
relay_chain_full_node.client.execute_with(StartConsensus {
para_id,
@@ -97,7 +98,7 @@ where
})?;
cumulus_client_collator::start_collator(cumulus_client_collator::StartCollatorParams {
backend,
runtime_api: client.clone(),
block_status,
announce_block,
overseer_handler: relay_chain_full_node
+81 -39
View File
@@ -28,8 +28,7 @@
//! Users must ensure that they register this pallet as an inherent provider.
use cumulus_primitives_core::{
relay_chain,
well_known_keys::{self, NEW_VALIDATION_CODE},
relay_chain, CollationInfo,
AbridgedHostConfiguration, ChannelStatus, DmpMessageHandler, GetChannelInfo,
InboundDownwardMessage, InboundHrmpMessage, MessageSendError, OnValidationData,
OutboundHrmpMessage, ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender,
@@ -162,7 +161,7 @@ pub mod pallet {
// TODO: #274 Return back messages that do not longer fit into the queue.
storage::unhashed::put(well_known_keys::UPWARD_MESSAGES, &up[0..num]);
UpwardMessages::<T>::put(&up[..num]);
*up = up.split_off(num);
});
@@ -180,39 +179,35 @@ pub mod pallet {
.min(<AnnouncedHrmpMessagesPerCandidate<T>>::take()) as usize;
let outbound_messages =
T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels);
T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels)
.into_iter()
.map(|(recipient, data)| OutboundHrmpMessage {
recipient,
data,
}).collect::<Vec<_>>();
// Note conversion to the `OutboundHrmpMessage` isn't needed since the data that
// `take_outbound_messages` returns encodes equivalently.
//
// The following code is a smoke test to check that the `OutboundHrmpMessage` type
// doesn't accidentally change (e.g. by having a field added to it). If the following
// line breaks, then we'll need to revisit the assumption that the result of
// `take_outbound_messages` can be placed into `HRMP_OUTBOUND_MESSAGES` directly
// without a decode/encode round-trip.
let _ = OutboundHrmpMessage {
recipient: ParaId::from(0),
data: vec![],
};
storage::unhashed::put(well_known_keys::HRMP_OUTBOUND_MESSAGES, &outbound_messages);
HrmpOutboundMessages::<T>::put(outbound_messages);
}
fn on_initialize(_n: T::BlockNumber) -> Weight {
// To prevent removing `NEW_VALIDATION_CODE` that was set by another `on_initialize`
let mut weight = 0;
// To prevent removing `NewValidationCode` that was set by another `on_initialize`
// like for example from scheduler, we only kill the storage entry if it was not yet
// updated in the current block.
if !<DidSetValidationCode<T>>::get() {
storage::unhashed::kill(NEW_VALIDATION_CODE);
NewValidationCode::<T>::kill();
weight += T::DbWeight::get().writes(1);
}
// Remove the validation from the old block.
<ValidationData<T>>::kill();
ProcessedDownwardMessages::<T>::kill();
HrmpWatermark::<T>::kill();
UpwardMessages::<T>::kill();
HrmpOutboundMessages::<T>::kill();
let mut weight = T::DbWeight::get().writes(3);
storage::unhashed::kill(well_known_keys::HRMP_WATERMARK);
storage::unhashed::kill(well_known_keys::UPWARD_MESSAGES);
storage::unhashed::kill(well_known_keys::HRMP_OUTBOUND_MESSAGES);
weight += T::DbWeight::get().writes(5);
// Here, in `on_initialize` we must report the weight for both `on_initialize` and
// `on_finalize`.
@@ -306,9 +301,9 @@ pub mod pallet {
if let Some(apply_block) = <PendingRelayChainBlockNumber<T>>::get() {
if vfp.relay_parent_number >= apply_block {
<PendingRelayChainBlockNumber<T>>::kill();
let validation_function = <PendingValidationFunction<T>>::take();
let validation_code = <PendingValidationCode<T>>::take();
<LastUpgrade<T>>::put(&apply_block);
Self::put_parachain_code(&validation_function);
Self::put_parachain_code(&validation_code);
Self::deposit_event(Event::ValidationFunctionApplied(vfp.relay_parent_number));
}
}
@@ -340,6 +335,7 @@ pub mod pallet {
total_weight += Self::process_inbound_horizontal_messages(
&relevant_messaging_state.ingress_channels,
horizontal_messages,
vfp.relay_parent_number,
);
Ok(PostDispatchInfo {
@@ -419,7 +415,7 @@ pub mod pallet {
/// We need to store the new validation function for the span between
/// setting it and applying it. If it has a
/// value, then [`PendingValidationFunction`] must have a real value, and
/// value, then [`PendingValidationCode`] must have a real value, and
/// together will coordinate the block number where the upgrade will happen.
#[pallet::storage]
pub(super) type PendingRelayChainBlockNumber<T: Config> =
@@ -430,7 +426,7 @@ pub mod pallet {
/// exist here as long as [`PendingRelayChainBlockNumber`] is set.
#[pallet::storage]
#[pallet::getter(fn new_validation_function)]
pub(super) type PendingValidationFunction<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
pub(super) type PendingValidationCode<T: Config> = StorageValue<_, Vec<u8>, ValueQuery>;
/// The [`PersistedValidationData`] set for this block.
#[pallet::storage]
@@ -481,6 +477,40 @@ pub mod pallet {
pub(super) type LastHrmpMqcHeads<T: Config> =
StorageValue<_, BTreeMap<ParaId, MessageQueueChain>, ValueQuery>;
/// Number of downward messages processed in a block.
///
/// This will be cleared in `on_initialize` of each new block.
#[pallet::storage]
pub(super) type ProcessedDownwardMessages<T: Config> = StorageValue<_, u32, ValueQuery>;
/// New validation code that was set in a block.
///
/// This will be cleared in `on_initialize` of each new block if no other pallet already set
/// the value.
#[pallet::storage]
pub(super) type NewValidationCode<T: Config> = StorageValue<_, Vec<u8>, OptionQuery>;
/// HRMP watermark that was set in a block.
///
/// This will be cleared in `on_initialize` of each new block.
#[pallet::storage]
pub(super) type HrmpWatermark<T: Config> =
StorageValue<_, relay_chain::v1::BlockNumber, ValueQuery>;
/// HRMP messages that were sent in a block.
///
/// This will be cleared in `on_initialize` of each new block.
#[pallet::storage]
pub(super) type HrmpOutboundMessages<T: Config> =
StorageValue<_, Vec<OutboundHrmpMessage>, ValueQuery>;
/// Upward messages that were sent in a block.
///
/// This will be cleared in `on_initialize` of each new block.
#[pallet::storage]
pub(super) type UpwardMessages<T: Config> = StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
/// Upward messages that are still pending and not yet send to the relay chain.
#[pallet::storage]
pub(super) type PendingUpwardMessages<T: Config> =
StorageValue<_, Vec<UpwardMessage>, ValueQuery>;
@@ -673,9 +703,7 @@ impl<T: Config> Pallet<T> {
// added improperly.
assert_eq!(dmq_head.0, expected_dmq_mqc_head);
// Store the processed_downward_messages here so that it will be accessible from
// PVF's `validate_block` wrapper and collation pipeline.
storage::unhashed::put(well_known_keys::PROCESSED_DOWNWARD_MESSAGES, &dm_count);
ProcessedDownwardMessages::<T>::put(dm_count);
weight_used
}
@@ -692,6 +720,7 @@ impl<T: Config> Pallet<T> {
fn process_inbound_horizontal_messages(
ingress_channels: &[(ParaId, cumulus_primitives_core::AbridgedHrmpChannel)],
horizontal_messages: BTreeMap<ParaId, Vec<InboundHrmpMessage>>,
relay_parent_number: relay_chain::v1::BlockNumber,
) -> Weight {
// First, check that all submitted messages are sent from channels that exist. The
// channel exists if its MQC head is present in `vfp.hrmp_mqc_heads`.
@@ -776,10 +805,9 @@ impl<T: Config> Pallet<T> {
<LastHrmpMqcHeads<T>>::put(running_mqc_heads);
// If we processed at least one message, then advance watermark to that location.
if let Some(hrmp_watermark) = hrmp_watermark {
storage::unhashed::put(well_known_keys::HRMP_WATERMARK, &hrmp_watermark);
}
// If we processed at least one message, then advance watermark to that location or if there
// were no messages, set it to the block number of the relay parent.
HrmpWatermark::<T>::put(hrmp_watermark.unwrap_or(relay_parent_number));
weight_used
}
@@ -788,7 +816,7 @@ impl<T: Config> Pallet<T> {
/// monitors for updates. Calling this function notifies polkadot that a new
/// upgrade has been scheduled.
fn notify_polkadot_of_pending_upgrade(code: &[u8]) {
storage::unhashed::put_raw(NEW_VALIDATION_CODE, code);
NewValidationCode::<T>::put(code);
<DidSetValidationCode<T>>::put(true);
}
@@ -831,7 +859,7 @@ impl<T: Config> Pallet<T> {
/// The implementation of the runtime upgrade functionality for parachains.
fn set_code_impl(validation_function: Vec<u8>) -> DispatchResult {
ensure!(
!<PendingValidationFunction<T>>::exists(),
!<PendingValidationCode<T>>::exists(),
Error::<T>::OverlappingUpgrades
);
let vfp = Self::validation_data().ok_or(Error::<T>::ValidationDataNotAvailable)?;
@@ -848,16 +876,30 @@ impl<T: Config> Pallet<T> {
// places, synchronized: both polkadot and the individual parachain
// have to upgrade on the same relay chain block.
//
// `notify_polkadot_of_pending_upgrade` notifies polkadot; the `PendingValidationFunction`
// `notify_polkadot_of_pending_upgrade` notifies polkadot; the `PendingValidationCode`
// storage keeps track locally for the parachain upgrade, which will
// be applied later.
Self::notify_polkadot_of_pending_upgrade(&validation_function);
<PendingRelayChainBlockNumber<T>>::put(apply_block);
<PendingValidationFunction<T>>::put(validation_function);
<PendingValidationCode<T>>::put(validation_function);
Self::deposit_event(Event::ValidationFunctionStored(apply_block));
Ok(())
}
/// Returns the [`CollationInfo`] of the current active block.
///
/// This is expected to be used by the
/// [`CollectCollationInfo`](cumulus_primitives_core::CollectCollationInfo) runtime api.
pub fn collect_collation_info() -> CollationInfo {
CollationInfo {
hrmp_watermark: HrmpWatermark::<T>::get(),
horizontal_messages: HrmpOutboundMessages::<T>::get(),
upward_messages: UpwardMessages::<T>::get(),
processed_downward_messages: ProcessedDownwardMessages::<T>::get(),
new_validation_code: NewValidationCode::<T>::get().map(Into::into),
}
}
}
pub struct ParachainSetCode<T>(sp_std::marker::PhantomData<T>);
+18 -25
View File
@@ -25,7 +25,6 @@ use frame_support::{
assert_ok,
dispatch::UnfilteredDispatchable,
parameter_types,
storage,
traits::{OnFinalize, OnInitialize},
weights::Weight,
inherent::{InherentData, ProvideInherent},
@@ -338,7 +337,7 @@ impl BlockTests {
}
<ValidationData<Test>>::put(&vfp);
storage::unhashed::kill(NEW_VALIDATION_CODE);
NewValidationCode::<Test>::kill();
// It is insufficient to push the validation function params
// to storage; they must also be included in the inherent data.
@@ -372,7 +371,7 @@ impl BlockTests {
ParachainSystem::on_finalize(*n);
// did block execution set new validation code?
if storage::unhashed::exists(NEW_VALIDATION_CODE) {
if NewValidationCode::<Test>::exists() {
if self.pending_upgrade.is_some() {
panic!("attempted to set validation code while upgrade was pending");
}
@@ -464,7 +463,7 @@ fn manipulates_storage() {
BlockTests::new()
.add(123, || {
assert!(
!<PendingValidationFunction<Test>>::exists(),
!<PendingValidationCode<Test>>::exists(),
"validation function must not exist yet"
);
assert_ok!(System::set_code(
@@ -472,7 +471,7 @@ fn manipulates_storage() {
Default::default()
));
assert!(
<PendingValidationFunction<Test>>::exists(),
<PendingValidationCode<Test>>::exists(),
"validation function must now exist"
);
})
@@ -481,7 +480,7 @@ fn manipulates_storage() {
|| {},
|| {
assert!(
!<PendingValidationFunction<Test>>::exists(),
!<PendingValidationCode<Test>>::exists(),
"validation function must have been unset"
);
},
@@ -516,18 +515,16 @@ fn send_upward_message_num_per_candidate() {
ParachainSystem::send_upward_message(b"message 2".to_vec()).unwrap();
},
|| {
let v: Option<Vec<Vec<u8>>> =
storage::unhashed::get(well_known_keys::UPWARD_MESSAGES);
assert_eq!(v, Some(vec![b"Mr F was here".to_vec()]),);
let v = UpwardMessages::<Test>::get();
assert_eq!(v, vec![b"Mr F was here".to_vec()]);
},
)
.add_with_post_test(
2,
|| { /* do nothing within block */ },
|| {
let v: Option<Vec<Vec<u8>>> =
storage::unhashed::get(well_known_keys::UPWARD_MESSAGES);
assert_eq!(v, Some(vec![b"message 2".to_vec()]),);
let v = UpwardMessages::<Test>::get();
assert_eq!(v, vec![b"message 2".to_vec()]);
},
);
}
@@ -552,18 +549,16 @@ fn send_upward_message_relay_bottleneck() {
},
|| {
// The message won't be sent because there is already one message in queue.
let v: Option<Vec<Vec<u8>>> =
storage::unhashed::get(well_known_keys::UPWARD_MESSAGES);
assert_eq!(v, Some(vec![]),);
let v = UpwardMessages::<Test>::get();
assert!(v.is_empty());
},
)
.add_with_post_test(
2,
|| { /* do nothing within block */ },
|| {
let v: Option<Vec<Vec<u8>>> =
storage::unhashed::get(well_known_keys::UPWARD_MESSAGES);
assert_eq!(v, Some(vec![vec![0u8; 8]]),);
let v = UpwardMessages::<Test>::get();
assert_eq!(v, vec![vec![0u8; 8]]);
},
);
}
@@ -656,23 +651,21 @@ fn send_hrmp_message_buffer_channel_close() {
|| {},
|| {
// both channels are at capacity so we do not expect any messages.
let v: Option<Vec<OutboundHrmpMessage>> =
storage::unhashed::get(well_known_keys::HRMP_OUTBOUND_MESSAGES);
assert_eq!(v, Some(vec![]));
let v = HrmpOutboundMessages::<Test>::get();
assert!(v.is_empty());
},
)
.add_with_post_test(
3,
|| {},
|| {
let v: Option<Vec<OutboundHrmpMessage>> =
storage::unhashed::get(well_known_keys::HRMP_OUTBOUND_MESSAGES);
let v = HrmpOutboundMessages::<Test>::get();
assert_eq!(
v,
Some(vec![OutboundHrmpMessage {
vec![OutboundHrmpMessage {
recipient: ParaId::from(300),
data: b"1".to_vec(),
}])
}]
);
},
);
@@ -24,19 +24,10 @@ use sp_std::prelude::*;
use hash_db::{HashDB, EMPTY_PREFIX};
use polkadot_parachain::primitives::{
HeadData, ValidationCode, ValidationParams, ValidationResult,
};
use polkadot_parachain::primitives::{HeadData, ValidationParams, ValidationResult};
use codec::{Decode, Encode};
use cumulus_primitives_core::{
well_known_keys::{
HRMP_OUTBOUND_MESSAGES, HRMP_WATERMARK, NEW_VALIDATION_CODE, PROCESSED_DOWNWARD_MESSAGES,
UPWARD_MESSAGES,
},
OutboundHrmpMessage, UpwardMessage,
};
use sp_core::storage::ChildInfo;
use sp_externalities::{set_and_run_with_externalities, Externalities};
use sp_trie::MemoryDB;
@@ -52,8 +43,6 @@ fn with_externalities<F: FnOnce(&mut dyn Externalities) -> R, R>(f: F) -> R {
sp_externalities::with_externalities(f).expect("Environmental externalities not set.")
}
type ParachainSystem<PSC> = crate::Module::<PSC>;
/// Validate a given parachain block on a validator.
#[doc(hidden)]
pub fn validate_block<B: BlockT, E: ExecuteBlock<B>, PSC: crate::Config>(
@@ -126,59 +115,26 @@ pub fn validate_block<B: BlockT, E: ExecuteBlock<B>, PSC: crate::Config>(
sp_io::offchain_index::host_clear.replace_implementation(host_offchain_index_clear),
);
let validation_data = set_and_run_with_externalities(&mut ext, || {
set_and_run_with_externalities(&mut ext, || {
super::set_and_run_with_validation_params(params, || {
E::execute_block(block);
ParachainSystem::<PSC>::validation_data()
.expect("`PersistedValidationData` should be set in every block!")
let new_validation_code = crate::NewValidationCode::<PSC>::get();
let upward_messages = crate::UpwardMessages::<PSC>::get();
let processed_downward_messages = crate::ProcessedDownwardMessages::<PSC>::get();
let horizontal_messages = crate::HrmpOutboundMessages::<PSC>::get();
let hrmp_watermark = crate::HrmpWatermark::<PSC>::get();
ValidationResult {
head_data,
new_validation_code: new_validation_code.map(Into::into),
upward_messages,
processed_downward_messages,
horizontal_messages,
hrmp_watermark,
}
})
});
// If in the course of block execution new validation code was set, insert
// its scheduled upgrade so we can validate that block number later.
let new_validation_code = overlay
.storage(NEW_VALIDATION_CODE)
.flatten()
.map(|slice| slice.to_vec())
.map(ValidationCode);
// Extract potential upward messages from the storage.
let upward_messages = match overlay.storage(UPWARD_MESSAGES).flatten() {
Some(encoded) => Vec::<UpwardMessage>::decode(&mut &encoded[..])
.expect("Upward messages vec is not correctly encoded in the storage!"),
None => Vec::new(),
};
let processed_downward_messages = overlay
.storage(PROCESSED_DOWNWARD_MESSAGES)
.flatten()
.map(|v| {
Decode::decode(&mut &v[..])
.expect("Processed downward message count is not correctly encoded in the storage")
})
.unwrap_or_default();
let horizontal_messages = match overlay.storage(HRMP_OUTBOUND_MESSAGES).flatten() {
Some(encoded) => Vec::<OutboundHrmpMessage>::decode(&mut &encoded[..])
.expect("Outbound HRMP messages vec is not correctly encoded in the storage!"),
None => Vec::new(),
};
let hrmp_watermark = overlay
.storage(HRMP_WATERMARK)
.flatten()
.map(|v| Decode::decode(&mut &v[..]).expect("HRMP watermark is not encoded correctly"))
.unwrap_or(validation_data.relay_parent_number);
ValidationResult {
head_data,
new_validation_code,
upward_messages,
processed_downward_messages,
horizontal_messages,
hrmp_watermark,
}
})
}
fn host_storage_read(key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option<u32> {
@@ -579,6 +579,12 @@ impl_runtime_apis! {
Aura::authorities()
}
}
impl cumulus_primitives_core::CollectCollationInfo<Block> for Runtime {
fn collect_collation_info() -> cumulus_primitives_core::CollationInfo {
ParachainSystem::collect_collation_info()
}
}
}
cumulus_pallet_parachain_system::register_validate_block!(
@@ -360,6 +360,12 @@ impl_runtime_apis! {
Vec::new()
}
}
impl cumulus_primitives_core::CollectCollationInfo<Block> for Runtime {
fn collect_collation_info() -> cumulus_primitives_core::CollationInfo {
ParachainSystem::collect_collation_info()
}
}
}
cumulus_pallet_parachain_system::register_validate_block!(Runtime, Executive);
+2 -2
View File
@@ -180,7 +180,8 @@ where
Block,
StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>,
> + sp_offchain::OffchainWorkerApi<Block>
+ sp_block_builder::BlockBuilder<Block>,
+ sp_block_builder::BlockBuilder<Block>
+ cumulus_primitives_core::CollectCollationInfo<Block>,
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
Executor: sc_executor::NativeExecutionDispatch + 'static,
RB: Fn(
@@ -302,7 +303,6 @@ where
collator_key,
relay_chain_full_node,
spawner,
backend,
parachain_consensus,
};
+2 -1
View File
@@ -7,6 +7,7 @@ edition = "2018"
[dependencies]
# Substrate dependencies
sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
sp-api = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
sp-trie = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
@@ -21,7 +22,6 @@ xcm = { git = "https://github.com/paritytech/polkadot", default-features = false
impl-trait-for-tuples = "0.2.1"
codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ "derive" ] }
[features]
default = [ "std" ]
std = [
@@ -32,5 +32,6 @@ std = [
"polkadot-core-primitives/std",
"sp-runtime/std",
"sp-trie/std",
"sp-api/std",
"frame-support/std",
]
+25 -29
View File
@@ -89,34 +89,6 @@ pub trait GetChannelInfo {
fn get_channel_max(id: ParaId) -> Option<usize>;
}
/// Well known keys for values in the storage.
pub mod well_known_keys {
/// The storage key for the upward messages.
///
/// The upward messages are stored as SCALE encoded `Vec<UpwardMessage>`.
pub const UPWARD_MESSAGES: &'static [u8] = b":cumulus_upward_messages:";
/// Code upgarde (set as appropriate by a pallet).
pub const NEW_VALIDATION_CODE: &'static [u8] = b":cumulus_new_validation_code:";
/// The storage key with which the runtime passes outbound HRMP messages it wants to send to the
/// PVF.
///
/// The value is stored as SCALE encoded `Vec<OutboundHrmpMessage>`
pub const HRMP_OUTBOUND_MESSAGES: &'static [u8] = b":cumulus_hrmp_outbound_messages:";
/// The storage key for communicating the HRMP watermark from the runtime to the PVF. Cleared by
/// the runtime each block and set after message inclusion, but only if there were messages.
///
/// The value is stored as SCALE encoded relay-chain's `BlockNumber`.
pub const HRMP_WATERMARK: &'static [u8] = b":cumulus_hrmp_watermark:";
/// The storage key for the processed downward messages.
///
/// The value is stored as SCALE encoded `u32`.
pub const PROCESSED_DOWNWARD_MESSAGES: &'static [u8] = b":cumulus_processed_downward_messages:";
}
/// Something that should be called when a downward message is received.
pub trait DmpMessageHandler {
/// Handle some incoming DMP messages (note these are individual XCM messages).
@@ -132,7 +104,7 @@ impl DmpMessageHandler for () {
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
_max_weight: Weight,
) -> Weight {
for _ in iter {}
iter.for_each(drop);
0
}
}
@@ -268,3 +240,27 @@ impl<B: BlockT> ParachainBlockData<B> {
(self.header, self.extrinsics, self.storage_proof)
}
}
/// Information about a collation.
#[derive(Clone, Debug, codec::Decode, codec::Encode, PartialEq)]
pub struct CollationInfo {
/// Messages destined to be interpreted by the Relay chain itself.
pub upward_messages: Vec<UpwardMessage>,
/// The horizontal messages sent by the parachain.
pub horizontal_messages: Vec<OutboundHrmpMessage>,
/// New validation code.
pub new_validation_code: Option<relay_chain::v1::ValidationCode>,
/// The number of messages processed from the DMQ.
pub processed_downward_messages: u32,
/// The mark which specifies the block number up to which all inbound HRMP messages are processed.
pub hrmp_watermark: relay_chain::v1::BlockNumber,
}
sp_api::decl_runtime_apis! {
/// Runtime api to collect information about a collation.
pub trait CollectCollationInfo {
/// Collect information about a collation.
#[skip_initialize_block]
fn collect_collation_info() -> CollationInfo;
}
}
+6
View File
@@ -395,6 +395,12 @@ impl_runtime_apis! {
UpgradeDetection::get()
}
}
impl cumulus_primitives_core::CollectCollationInfo<Block> for Runtime {
fn collect_collation_info() -> cumulus_primitives_core::CollationInfo {
ParachainSystem::collect_collation_info()
}
}
}
cumulus_pallet_parachain_system::register_validate_block!(Runtime, Executive);
-1
View File
@@ -266,7 +266,6 @@ where
relay_chain_full_node.with_client(polkadot_test_service::TestClient);
let params = StartCollatorParams {
backend: params.backend,
block_status: client.clone(),
announce_block,
client: client.clone(),