DMP Queue pallet (#416)

* Introduce the converter into the hub

* Parachain recognises Rococo governance body as admin

* Whitespace

* Use UsingComponents for fee payment in XCM

* Fixes

* Fixes for XCM permissions

* Remove encode_call test

* Fixes

* Rococo Collator supports Shell runtime

* Fixes

* Fixes

* Initial draft of DMP Queue pallet

* DMP Queue builds.

* Companion for Polkadot gav-allow-xcm-exec

* Bump

* Fix std

* Fixes

* fix and improve docs

* fix compile errors in tests

* add test for try_service_message

* update cargo.lock

* Fixes

* Make test name read well

* Fixes

* Add a couple of simple tests

* Tests

* Tests

* Update pallets/dmp-queue/src/lib.rs

Co-authored-by: Alexander Popiak <alexander.popiak@parity.io>

* Update pallets/dmp-queue/src/lib.rs

Co-authored-by: Alexander Popiak <alexander.popiak@parity.io>

* Update pallets/dmp-queue/src/lib.rs

Co-authored-by: Alexander Popiak <alexander.popiak@parity.io>

* Update pallets/dmp-queue/src/lib.rs

Co-authored-by: Alexander Popiak <alexander.popiak@parity.io>

* Update pallets/dmp-queue/src/lib.rs

Co-authored-by: Alexander Popiak <alexander.popiak@parity.io>

* Update pallets/dmp-queue/src/lib.rs

Co-authored-by: Alexander Popiak <alexander.popiak@parity.io>

* Update pallets/dmp-queue/src/lib.rs

Co-authored-by: Alexander Popiak <alexander.popiak@parity.io>

* Chain ID and ParaID don't collide

* Fixes

* Update pallets/dmp-queue/src/lib.rs

Co-authored-by: Shawn Tabrizi <shawntabrizi@gmail.com>

* Update pallets/dmp-queue/src/lib.rs

Co-authored-by: Shawn Tabrizi <shawntabrizi@gmail.com>

* Fixes

Co-authored-by: Alexander Popiak <alexander.popiak@parity.io>
Co-authored-by: Shawn Tabrizi <shawntabrizi@gmail.com>
This commit is contained in:
Gavin Wood
2021-05-02 16:11:58 +02:00
committed by GitHub
parent 67102885dd
commit dd5ad841a0
14 changed files with 1279 additions and 369 deletions
+253 -220
View File
File diff suppressed because it is too large Load Diff
+1
View File
@@ -5,6 +5,7 @@ members = [
"client/consensus/relay-chain",
"client/network",
"client/service",
"pallets/dmp-queue",
"pallets/parachain-system",
"pallets/xcm",
"pallets/xcmp-queue",
+45
View File
@@ -0,0 +1,45 @@
[package]
name = "cumulus-pallet-dmp-queue"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
edition = "2018"
[dependencies]
# Other dependencies
codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ], default-features = false }
log = { version = "0.4.14", default-features = false }
rand = { version = "0.8.3", default-features = false }
rand_chacha = { version = "0.3.0", default-features = false }
# Substrate Dependencies
sp-std = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
sp-runtime = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
frame-system = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
# Polkadot Dependencies
xcm = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" }
xcm-executor = { git = "https://github.com/paritytech/polkadot", default-features = false, branch = "master" }
# Cumulus Dependencies
cumulus-primitives-core = { path = "../../primitives/core", default-features = false }
[dev-dependencies]
sp-core = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
sp-version = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
[features]
default = [ "std" ]
std = [
"codec/std",
"log/std",
"sp-std/std",
"sp-io/std",
"sp-runtime/std",
"frame-support/std",
"frame-system/std",
"cumulus-primitives-core/std",
"xcm/std",
"xcm-executor/std",
]
+786
View File
@@ -0,0 +1,786 @@
// Copyright 2020-2021 Parity Technologies (UK) Ltd.
// This file is part of Cumulus.
// Cumulus is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Cumulus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.
//! Pallet implementing a message queue for downward messages from the relay-chain.
//! Executes downward messages if there is enough weight available and schedules the rest for later
//! execution (by `on_idle` or another `handle_dmp_messages` call). Individual overweight messages
//! are scheduled into a separate queue that is only serviced by explicit extrinsic calls.
#![cfg_attr(not(feature = "std"), no_std)]
use sp_std::{prelude::*, convert::TryFrom};
use cumulus_primitives_core::relay_chain::BlockNumber as RelayBlockNumber;
use cumulus_primitives_core::DmpMessageHandler;
use codec::{Encode, Decode};
use sp_runtime::RuntimeDebug;
use xcm::{VersionedXcm, v0::{Xcm, Junction, Outcome, ExecuteXcm, Error as XcmError}};
use frame_support::{traits::EnsureOrigin, dispatch::Weight, weights::constants::WEIGHT_PER_MILLIS};
pub use pallet::*;
#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)]
pub struct ConfigData {
/// The maximum amount of weight any individual message may consume. Messages above this weight
/// go into the overweight queue and may only be serviced explicitly by the
/// `ExecuteOverweightOrigin`.
max_individual: Weight,
}
impl Default for ConfigData {
fn default() -> Self {
Self {
max_individual: 10 * WEIGHT_PER_MILLIS, // 10 ms of execution time maximum by default
}
}
}
/// Information concerning our message pages.
#[derive(Copy, Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug)]
pub struct PageIndexData {
/// The lowest used page index.
begin_used: PageCounter,
/// The lowest unused page index.
end_used: PageCounter,
/// The number of overweight messages ever recorded (and thus the lowest free index).
overweight_count: OverweightIndex,
}
/// Simple type used to identify messages for the purpose of reporting events. Secure if and only
/// if the message content is unique.
pub type MessageId = [u8; 32];
/// Index used to identify overweight messages.
pub type OverweightIndex = u64;
/// Index used to identify normal pages.
pub type PageCounter = u32;
#[frame_support::pallet]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
use frame_system::pallet_prelude::*;
#[pallet::pallet]
#[pallet::generate_store(pub(super) trait Store)]
pub struct Pallet<T>(_);
/// The module configuration trait.
#[pallet::config]
pub trait Config: frame_system::Config {
/// The overarching event type.
type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>;
type XcmExecutor: ExecuteXcm<Self::Call>;
/// Origin which is allowed to execute overweight messages.
type ExecuteOverweightOrigin: EnsureOrigin<Self::Origin>;
}
/// The configuration.
#[pallet::storage]
pub(super) type Configuration<T> = StorageValue<_, ConfigData, ValueQuery>;
/// The page index.
#[pallet::storage]
pub(super) type PageIndex<T> = StorageValue<_, PageIndexData, ValueQuery>;
/// The queue pages.
#[pallet::storage]
pub(super) type Pages<T> = StorageMap<
_,
Blake2_128Concat,
PageCounter,
Vec<(RelayBlockNumber, Vec<u8>)>,
ValueQuery,
>;
/// The overweight messages.
#[pallet::storage]
pub(super) type Overweight<T> = StorageMap<
_,
Blake2_128Concat,
OverweightIndex,
(RelayBlockNumber, Vec<u8>),
OptionQuery,
>;
#[pallet::error]
pub enum Error<T> {
/// The message index given is unknown.
Unknown,
/// The amount of weight given is possibly not enough for executing the message.
OverLimit,
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_idle(_now: T::BlockNumber, max_weight: Weight) -> Weight {
// on_idle processes additional messages with any remaining block weight.
Self::service_queue(max_weight)
}
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Service a single overweight message.
///
/// - `origin`: Must pass `ExecuteOverweightOrigin`.
/// - `index`: The index of the overweight message to service.
/// - `weight_limit`: The amount of weight that message execution may take.
///
/// Errors:
/// - `Unknown`: Message of `index` is unknown.
/// - `OverLimit`: Message execution may use greater than `weight_limit`.
///
/// Events:
/// - `OverweightServiced`: On success.
#[pallet::weight(weight_limit.saturating_add(1_000_000))]
pub fn service_overweight(
origin: OriginFor<T>,
index: OverweightIndex,
weight_limit: Weight,
) -> DispatchResultWithPostInfo {
T::ExecuteOverweightOrigin::ensure_origin(origin)?;
let (sent_at, data) = Overweight::<T>::get(index).ok_or(Error::<T>::Unknown)?;
let used = Self::try_service_message(weight_limit, sent_at, &data[..])
.map_err(|_| Error::<T>::OverLimit)?;
Overweight::<T>::remove(index);
Self::deposit_event(Event::OverweightServiced(index, used));
Ok(Some(used.saturating_add(1_000_000)).into())
}
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
#[pallet::metadata(T::BlockNumber = "BlockNumber")]
pub enum Event<T: Config> {
/// Downward message is invalid XCM.
/// \[ id \]
InvalidFormat(MessageId),
/// Downward message is unsupported version of XCM.
/// \[ id \]
UnsupportedVersion(MessageId),
/// Downward message executed with the given outcome.
/// \[ id, outcome \]
ExecutedDownward(MessageId, Outcome),
/// The weight limit for handling downward messages was reached.
/// \[ id, remaining, required \]
WeightExhausted(MessageId, Weight, Weight),
/// Downward message is overweight and was placed in the overweight queue.
/// \[ id, index, required \]
OverweightEnqueued(MessageId, OverweightIndex, Weight),
/// Downward message from the overweight queue was executed.
/// \[ index, used \]
OverweightServiced(OverweightIndex, Weight),
}
impl<T: Config> Pallet<T> {
/// Service the message queue up to some given weight `limit`.
///
/// Returns the weight consumed by executing messages in the queue.
fn service_queue(limit: Weight) -> Weight {
PageIndex::<T>::mutate(|page_index| Self::do_service_queue(limit, page_index))
}
/// Exactly equivalent to `service_queue` but expects a mutable `page_index` to be passed
/// in and any changes stored.
fn do_service_queue(limit: Weight, page_index: &mut PageIndexData) -> Weight {
let mut used = 0;
while page_index.begin_used < page_index.end_used {
let page = Pages::<T>::take(page_index.begin_used);
for (i, &(sent_at, ref data)) in page.iter().enumerate() {
match Self::try_service_message(limit.saturating_sub(used), sent_at, &data[..]) {
Ok(w) => used += w,
Err(..) => {
// Too much weight needed - put the remaining messages back and bail
Pages::<T>::insert(page_index.begin_used, &page[i..]);
return used;
}
}
}
page_index.begin_used += 1;
}
if page_index.begin_used == page_index.end_used {
// Reset if there's no pages left.
page_index.begin_used = 0;
page_index.end_used = 0;
}
used
}
/// Attempt to service an individual message. Will return `Ok` with the execution weight
/// consumed unless the message was found to need more weight than `limit`.
///
/// NOTE: This will return `Ok` in the case of an error decoding, weighing or executing
/// the message. This is why it's called message "servicing" rather than "execution".
pub(crate) fn try_service_message(
limit: Weight,
_sent_at: RelayBlockNumber,
data: &[u8],
) -> Result<Weight, (MessageId, Weight)> {
let id = sp_io::hashing::blake2_256(&data[..]);
let maybe_msg = VersionedXcm::<T::Call>::decode(&mut &data[..])
.map(Xcm::<T::Call>::try_from);
match maybe_msg {
Err(_) => {
Self::deposit_event(Event::InvalidFormat(id));
Ok(0)
},
Ok(Err(())) => {
Self::deposit_event(Event::UnsupportedVersion(id));
Ok(0)
},
Ok(Ok(x)) => {
let outcome = T::XcmExecutor::execute_xcm(Junction::Parent.into(), x, limit);
match outcome {
Outcome::Error(XcmError::WeightLimitReached(required)) => Err((id, required)),
outcome => {
let weight_used = outcome.weight_used();
Self::deposit_event(Event::ExecutedDownward(id, outcome));
Ok(weight_used)
}
}
}
}
}
}
/// For an incoming downward message, this just adapts an XCM executor and executes DMP messages
/// immediately up until some `MaxWeight` at which point it errors. Their origin is asserted to be
/// the `Parent` location.
impl<T: Config> DmpMessageHandler for Pallet<T> {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
limit: Weight,
) -> Weight {
let mut page_index = PageIndex::<T>::get();
let config = Configuration::<T>::get();
// First try to use `max_weight` to service the current queue.
let mut used = Self::do_service_queue(limit, &mut page_index);
// Then if the queue is empty, use the weight remaining to service the incoming messages
// and once we run out of weight, place them in the queue.
let item_count = iter.size_hint().0;
let mut maybe_enqueue_page = if page_index.end_used > page_index.begin_used {
// queue is already non-empty - start a fresh page.
Some(Vec::with_capacity(item_count))
} else {
None
};
for (i, (sent_at, data)) in iter.enumerate() {
if maybe_enqueue_page.is_none() {
// We're not currently enqueuing - try to execute inline.
let remaining = limit.saturating_sub(used);
match Self::try_service_message(remaining, sent_at, &data[..]) {
Ok(consumed) => used += consumed,
Err((id, required)) =>
// Too much weight required right now.
if required > config.max_individual {
// overweight - add to overweight queue and continue with
// message execution.
let index = page_index.overweight_count;
Overweight::<T>::insert(index, (sent_at, data));
Self::deposit_event(Event::OverweightEnqueued(id, index, required));
page_index.overweight_count += 1;
// Not needed for control flow, but only to ensure that the compiler
// understands that we won't attempt to re-use `data` later.
continue;
} else {
// not overweight. stop executing inline and enqueue normally
// from here on.
let item_count_left = item_count.saturating_sub(i);
maybe_enqueue_page = Some(Vec::with_capacity(item_count_left));
Self::deposit_event(Event::WeightExhausted(id, remaining, required));
}
}
}
// Cannot be an `else` here since the `maybe_enqueue_page` may have changed.
if let Some(ref mut enqueue_page) = maybe_enqueue_page {
enqueue_page.push((sent_at, data));
}
}
// Deposit the enqueued page if any and save the index.
if let Some(enqueue_page) = maybe_enqueue_page {
Pages::<T>::insert(page_index.end_used, enqueue_page);
page_index.end_used += 1;
}
PageIndex::<T>::put(page_index);
used
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate as dmp_queue;
use std::cell::RefCell;
use codec::Encode;
use cumulus_primitives_core::ParaId;
use frame_support::{parameter_types, assert_noop, traits::OnIdle};
use sp_core::H256;
use sp_runtime::{testing::Header, traits::{IdentityLookup, BlakeTwo256}};
use sp_runtime::DispatchError::BadOrigin;
use sp_version::RuntimeVersion;
use xcm::v0::{MultiLocation, OriginKind};
type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<Test>;
type Block = frame_system::mocking::MockBlock<Test>;
type Xcm = xcm::v0::Xcm<Call>;
frame_support::construct_runtime!(
pub enum Test where
Block = Block,
NodeBlock = Block,
UncheckedExtrinsic = UncheckedExtrinsic,
{
System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
DmpQueue: dmp_queue::{Pallet, Call, Storage, Event<T>},
}
);
parameter_types! {
pub const BlockHashCount: u64 = 250;
pub Version: RuntimeVersion = RuntimeVersion {
spec_name: sp_version::create_runtime_str!("test"),
impl_name: sp_version::create_runtime_str!("system-test"),
authoring_version: 1,
spec_version: 1,
impl_version: 1,
apis: sp_version::create_apis_vec!([]),
transaction_version: 1,
};
pub const ParachainId: ParaId = ParaId::new(200);
pub const ReservedXcmpWeight: Weight = 0;
pub const ReservedDmpWeight: Weight = 0;
}
type AccountId = u64;
impl frame_system::Config for Test {
type Origin = Origin;
type Call = Call;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type AccountId = AccountId;
type Lookup = IdentityLookup<Self::AccountId>;
type Header = Header;
type Event = Event;
type BlockHashCount = BlockHashCount;
type BlockLength = ();
type BlockWeights = ();
type Version = Version;
type PalletInfo = PalletInfo;
type AccountData = ();
type OnNewAccount = ();
type OnKilledAccount = ();
type DbWeight = ();
type BaseCallFilter = ();
type SystemWeightInfo = ();
type SS58Prefix = ();
type OnSetCode = ();
}
thread_local! {
pub static TRACE: RefCell<Vec<(Xcm, Outcome)>> = RefCell::new(Vec::new());
}
pub fn take_trace() -> Vec<(Xcm, Outcome)> {
TRACE.with(|q| {
let q = &mut *q.borrow_mut();
let r = q.clone();
q.clear();
r
})
}
pub struct MockExec;
impl ExecuteXcm<Call> for MockExec {
type Call = Call;
fn execute_xcm(_origin: MultiLocation, message: Xcm, weight_limit: Weight) -> Outcome {
let o = match &message {
Xcm::Transact { require_weight_at_most, .. } => {
if *require_weight_at_most <= weight_limit {
Outcome::Complete(*require_weight_at_most)
} else {
Outcome::Error(XcmError::WeightLimitReached(*require_weight_at_most))
}
},
// use 1000 to decide that it's not supported.
_ => Outcome::Incomplete(1000.min(weight_limit), XcmError::Unimplemented),
};
TRACE.with(|q| q.borrow_mut().push((message, o.clone())));
o
}
}
impl Config for Test {
type Event = Event;
type XcmExecutor = MockExec;
type ExecuteOverweightOrigin = frame_system::EnsureRoot<AccountId>;
}
pub(crate) fn new_test_ext() -> sp_io::TestExternalities {
frame_system::GenesisConfig::default().build_storage::<Test>().unwrap().into()
}
fn enqueue(enqueued: &[Xcm]) {
if !enqueued.is_empty() {
let mut index = PageIndex::<Test>::get();
Pages::<Test>::insert(index.end_used, enqueued.iter()
.map(|m| (0, VersionedXcm::<Call>::from(m.clone()).encode()))
.collect::<Vec<_>>()
);
index.end_used += 1;
PageIndex::<Test>::put(index);
}
}
fn handle_messages(incoming: &[Xcm], limit: Weight) -> Weight {
let iter = incoming.iter().map(|m| (0, VersionedXcm::<Call>::from(m.clone()).encode()));
DmpQueue::handle_dmp_messages(iter, limit)
}
fn msg(weight: Weight) -> Xcm {
Xcm::Transact {
origin_type: OriginKind::Native,
require_weight_at_most: weight,
call: vec![].into(),
}
}
fn msg_complete(weight: Weight) -> (Xcm, Outcome) {
(msg(weight), Outcome::Complete(weight))
}
fn msg_limit_reached(weight: Weight) -> (Xcm, Outcome) {
(msg(weight), Outcome::Error(XcmError::WeightLimitReached(weight)))
}
fn pages_queued() -> PageCounter {
PageIndex::<Test>::get().end_used - PageIndex::<Test>::get().begin_used
}
fn queue_is_empty() -> bool {
pages_queued() == 0
}
fn overweights() -> Vec<OverweightIndex> {
(0..PageIndex::<Test>::get().overweight_count)
.filter(|i| Overweight::<Test>::contains_key(i))
.collect::<Vec<_>>()
}
#[test]
fn basic_setup_works() {
new_test_ext().execute_with(|| {
let weight_used = handle_messages(&[], 1000);
assert_eq!(weight_used, 0);
assert_eq!(take_trace(), vec![]);
assert!(queue_is_empty());
});
}
#[test]
fn service_inline_complete_works() {
new_test_ext().execute_with(|| {
let incoming = vec![ msg(1000), msg(1001) ];
let weight_used = handle_messages(&incoming, 2500);
assert_eq!(weight_used, 2001);
assert_eq!(take_trace(), vec![msg_complete(1000), msg_complete(1001)]);
assert!(queue_is_empty());
});
}
#[test]
fn service_enqueued_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001), msg(1002) ];
enqueue(&enqueued);
let weight_used = handle_messages(&[], 2500);
assert_eq!(weight_used, 2001);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_limit_reached(1002),
]);
});
}
#[test]
fn enqueue_works() {
new_test_ext().execute_with(|| {
let incoming = vec![ msg(1000), msg(1001), msg(1002) ];
let weight_used = handle_messages(&incoming, 999);
assert_eq!(weight_used, 0);
assert_eq!(PageIndex::<Test>::get(), PageIndexData { begin_used: 0, end_used: 1, overweight_count: 0});
assert_eq!(Pages::<Test>::get(0).len(), 3);
assert_eq!(take_trace(), vec![ msg_limit_reached(1000) ]);
let weight_used = handle_messages(&[], 2500);
assert_eq!(weight_used, 2001);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_limit_reached(1002),
]);
let weight_used = handle_messages(&[], 2500);
assert_eq!(weight_used, 1002);
assert_eq!(take_trace(), vec![
msg_complete(1002),
]);
assert!(queue_is_empty());
});
}
#[test]
fn service_inline_then_enqueue_works() {
new_test_ext().execute_with(|| {
let incoming = vec![ msg(1000), msg(1001), msg(1002) ];
let weight_used = handle_messages(&incoming, 1500);
assert_eq!(weight_used, 1000);
assert_eq!(pages_queued(), 1);
assert_eq!(Pages::<Test>::get(0).len(), 2);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_limit_reached(1001),
]);
let weight_used = handle_messages(&[], 2500);
assert_eq!(weight_used, 2003);
assert_eq!(take_trace(), vec![
msg_complete(1001),
msg_complete(1002),
]);
assert!(queue_is_empty());
});
}
#[test]
fn service_enqueued_and_inline_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001) ];
let incoming = vec![ msg(1002), msg(1003) ];
enqueue(&enqueued);
let weight_used = handle_messages(&incoming, 5000);
assert_eq!(weight_used, 4006);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_complete(1002),
msg_complete(1003),
]);
assert!(queue_is_empty());
});
}
#[test]
fn service_enqueued_partially_and_then_enqueue_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(10001) ];
let incoming = vec![ msg(1002), msg(1003) ];
enqueue(&enqueued);
let weight_used = handle_messages(&incoming, 5000);
assert_eq!(weight_used, 1000);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_limit_reached(10001),
]);
assert_eq!(pages_queued(), 2);
// 5000 is not enough to process the 10001 blocker, so nothing happens.
let weight_used = handle_messages(&[], 5000);
assert_eq!(weight_used, 0);
assert_eq!(take_trace(), vec![
msg_limit_reached(10001),
]);
// 20000 is now enough to process everything.
let weight_used = handle_messages(&[], 20000);
assert_eq!(weight_used, 12006);
assert_eq!(take_trace(), vec![
msg_complete(10001),
msg_complete(1002),
msg_complete(1003),
]);
assert!(queue_is_empty());
});
}
#[test]
fn service_enqueued_completely_and_then_enqueue_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001) ];
let incoming = vec![ msg(10002), msg(1003) ];
enqueue(&enqueued);
let weight_used = handle_messages(&incoming, 5000);
assert_eq!(weight_used, 2001);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_limit_reached(10002),
]);
assert_eq!(pages_queued(), 1);
// 20000 is now enough to process everything.
let weight_used = handle_messages(&[], 20000);
assert_eq!(weight_used, 11005);
assert_eq!(take_trace(), vec![
msg_complete(10002),
msg_complete(1003),
]);
assert!(queue_is_empty());
});
}
#[test]
fn service_enqueued_then_inline_then_enqueue_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001) ];
let incoming = vec![ msg(1002), msg(10003) ];
enqueue(&enqueued);
let weight_used = handle_messages(&incoming, 5000);
assert_eq!(weight_used, 3003);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_complete(1002),
msg_limit_reached(10003),
]);
assert_eq!(pages_queued(), 1);
// 20000 is now enough to process everything.
let weight_used = handle_messages(&[], 20000);
assert_eq!(weight_used, 10003);
assert_eq!(take_trace(), vec![
msg_complete(10003),
]);
assert!(queue_is_empty());
});
}
#[test]
fn page_crawling_works() {
new_test_ext().execute_with(|| {
let enqueued = vec![ msg(1000), msg(1001) ];
enqueue(&enqueued);
let weight_used = handle_messages(&vec![ msg(1002) ], 1500);
assert_eq!(weight_used, 1000);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_limit_reached(1001),
]);
assert_eq!(pages_queued(), 2);
assert_eq!(PageIndex::<Test>::get().begin_used, 0);
let weight_used = handle_messages(&vec![ msg(1003) ], 1500);
assert_eq!(weight_used, 1001);
assert_eq!(take_trace(), vec![
msg_complete(1001),
msg_limit_reached(1002),
]);
assert_eq!(pages_queued(), 2);
assert_eq!(PageIndex::<Test>::get().begin_used, 1);
let weight_used = handle_messages(&vec![ msg(1004) ], 1500);
assert_eq!(weight_used, 1002);
assert_eq!(take_trace(), vec![
msg_complete(1002),
msg_limit_reached(1003),
]);
assert_eq!(pages_queued(), 2);
assert_eq!(PageIndex::<Test>::get().begin_used, 2);
});
}
#[test]
fn overweight_should_not_block_queue() {
new_test_ext().execute_with(|| {
// Set the overweight threshold to 9999.
Configuration::<Test>::put(ConfigData { max_individual: 9999 });
let incoming = vec![ msg(1000), msg(10001), msg(1002) ];
let weight_used = handle_messages(&incoming, 2500);
assert_eq!(weight_used, 2002);
assert!(queue_is_empty());
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_limit_reached(10001),
msg_complete(1002),
]);
assert_eq!(overweights(), vec![0]);
});
}
#[test]
fn overweights_should_be_manually_executable() {
new_test_ext().execute_with(|| {
// Set the overweight threshold to 9999.
Configuration::<Test>::put(ConfigData { max_individual: 9999 });
let incoming = vec![ msg(10000) ];
let weight_used = handle_messages(&incoming, 2500);
assert_eq!(weight_used, 0);
assert_eq!(take_trace(), vec![ msg_limit_reached(10000) ]);
assert_eq!(overweights(), vec![0]);
assert_noop!(DmpQueue::service_overweight(Origin::signed(1), 0, 20000), BadOrigin);
assert_noop!(DmpQueue::service_overweight(Origin::root(), 1, 20000), Error::<Test>::Unknown);
assert_noop!(DmpQueue::service_overweight(Origin::root(), 0, 9999), Error::<Test>::OverLimit);
assert_eq!(take_trace(), vec![ msg_limit_reached(10000) ]);
let base_weight = super::Call::<Test>::service_overweight(0, 0).get_dispatch_info().weight;
use frame_support::weights::GetDispatchInfo;
let info = DmpQueue::service_overweight(Origin::root(), 0, 20000).unwrap();
let actual_weight = info.actual_weight.unwrap();
assert_eq!(actual_weight, base_weight + 10000);
assert_eq!(take_trace(), vec![ msg_complete(10000) ]);
assert!(overweights().is_empty());
assert_noop!(DmpQueue::service_overweight(Origin::root(), 0, 20000), Error::<Test>::Unknown);
});
}
#[test]
fn on_idle_should_service_queue() {
new_test_ext().execute_with(|| {
enqueue(&vec![ msg(1000), msg(1001) ]);
enqueue(&vec![ msg(1002), msg(1003) ]);
enqueue(&vec![ msg(1004), msg(1005) ]);
let weight_used = DmpQueue::on_idle(1, 6000);
assert_eq!(weight_used, 5010);
assert_eq!(take_trace(), vec![
msg_complete(1000),
msg_complete(1001),
msg_complete(1002),
msg_complete(1003),
msg_complete(1004),
msg_limit_reached(1005),
]);
assert_eq!(pages_queued(), 1);
});
}
}
+54 -50
View File
@@ -42,7 +42,7 @@ use polkadot_parachain::primitives::RelayChainBlockNumber;
use cumulus_primitives_core::{
relay_chain,
well_known_keys::{self, NEW_VALIDATION_CODE},
AbridgedHostConfiguration, DownwardMessageHandler, XcmpMessageHandler,
AbridgedHostConfiguration, DmpMessageHandler, XcmpMessageHandler,
InboundDownwardMessage, InboundHrmpMessage, OnValidationData, OutboundHrmpMessage, ParaId,
PersistedValidationData, UpwardMessage, UpwardMessageSender, MessageSendError,
XcmpMessageSource, ChannelStatus, GetChannelInfo,
@@ -70,13 +70,16 @@ pub trait Config: frame_system::Config<OnSetCode = ParachainSetCode<Self>> {
/// Returns the parachain ID we are running with.
type SelfParaId: Get<ParaId>;
/// The downward message handlers that will be informed when a message is received.
type DownwardMessageHandlers: DownwardMessageHandler;
/// The place where outbound XCMP messages come from. This is queried in `finalize_block`.
type OutboundXcmpMessageSource: XcmpMessageSource;
/// The HRMP message handlers that will be informed when a message is received.
/// The message handler that will be invoked when messages are received via DMP.
type DmpMessageHandler: DmpMessageHandler;
/// The weight we reserve at the beginning of the block for processing DMP messages.
type ReservedDmpWeight: Get<Weight>;
/// The message handler that will be invoked when messages are received via XCMP.
///
/// The messages are dispatched in the order they were relayed by the relay chain. If multiple
/// messages were relayed at one block, these will be dispatched in ascending order of the
@@ -147,6 +150,10 @@ decl_storage! {
/// overrides the amount set in the Config trait.
ReservedXcmpWeightOverride: Option<Weight>;
/// The weight we reserve at the beginning of the block for processing DMP messages. This
/// overrides the amount set in the Config trait.
ReservedDmpWeightOverride: Option<Weight>;
/// The next authorized upgrade, if there is one.
AuthorizedUpgrade: Option<T::Hash>;
}
@@ -353,7 +360,7 @@ decl_module! {
storage::unhashed::put(well_known_keys::HRMP_OUTBOUND_MESSAGES, &outbound_messages);
}
fn on_initialize(n: T::BlockNumber) -> Weight {
fn on_initialize(_n: T::BlockNumber) -> Weight {
// To prevent removing `NEW_VALIDATION_CODE` that was set by another `on_initialize` like
// for example from scheduler, we only kill the storage entry if it was not yet updated
// in the current block.
@@ -430,6 +437,9 @@ impl<T: Config> sp_runtime::traits::ValidateUnsigned for Module<T> {
})
}
}
if let Call::set_validation_data(..) = call {
return Ok(Default::default())
}
Err(InvalidTransaction::Call.into())
}
}
@@ -520,41 +530,28 @@ impl<T: Config> Module<T> {
downward_messages: Vec<InboundDownwardMessage>,
) -> Weight {
let dm_count = downward_messages.len() as u32;
let mut dmq_head = LastDmqMqcHead::get();
let mut weight_used = 0;
if dm_count != 0 {
let mut processed_count = 0;
Self::deposit_event(RawEvent::DownwardMessagesReceived(dm_count));
let max_weight = ReservedDmpWeightOverride::get().unwrap_or_else(T::ReservedDmpWeight::get);
// Reference fu to avoid the `move` capture.
let weight_used_ref = &mut weight_used;
let processed_count_ref = &mut processed_count;
let result_mqc_head = LastDmqMqcHead::mutate(move |mqc| {
for downward_message in downward_messages {
mqc.extend_downward(&downward_message);
*weight_used_ref += T::DownwardMessageHandlers::handle_downward_message(downward_message);
*processed_count_ref += 1;
}
mqc.0
});
let message_iter = downward_messages.into_iter()
.inspect(|m| { dmq_head.extend_downward(m); })
.map(|m| (m.sent_at, m.msg));
weight_used += T::DmpMessageHandler::handle_dmp_messages(message_iter, max_weight);
LastDmqMqcHead::put(&dmq_head);
Self::deposit_event(RawEvent::DownwardMessagesProcessed(
processed_count,
weight_used,
result_mqc_head.clone(),
expected_dmq_mqc_head.clone(),
));
Self::deposit_event(RawEvent::DownwardMessagesProcessed(weight_used, dmq_head.0));
};
// After hashing each message in the message queue chain submitted by the collator, we should
// arrive to the MQC head provided by the relay chain.
//
// A mismatch means that at least some of the submitted messages were altered, omitted or added
// improperly.
assert_eq!(result_mqc_head, expected_dmq_mqc_head);
} else {
assert_eq!(LastDmqMqcHead::get().0, expected_dmq_mqc_head);
}
// After hashing each message in the message queue chain submitted by the collator, we should
// arrive to the MQC head provided by the relay chain.
//
// A mismatch means that at least some of the submitted messages were altered, omitted or added
// improperly.
assert_eq!(dmq_head.0, expected_dmq_mqc_head);
// Store the processed_downward_messages here so that it will be accessible from
// PVF's `validate_block` wrapper and collation pipeline.
@@ -854,12 +851,12 @@ decl_event! {
ValidationFunctionApplied(RelayChainBlockNumber),
/// An upgrade has been authorized.
UpgradeAuthorized(Hash),
/// Downward messages were processed using the given weight.
/// \[ count, weight_used, result_mqc_head, expected_mqc_head \]
DownwardMessagesProcessed(u32, Weight, relay_chain::Hash, relay_chain::Hash),
/// Some downward messages have been received and will be processed.
/// \[ count \]
DownwardMessagesReceived(u32),
/// Downward messages were processed using the given weight.
/// \[ weight_used, result_mqc_head \]
DownwardMessagesProcessed(Weight, relay_chain::Hash),
}
}
@@ -938,6 +935,7 @@ mod tests {
};
pub const ParachainId: ParaId = ParaId::new(200);
pub const ReservedXcmpWeight: Weight = 0;
pub const ReservedDmpWeight: Weight = 0;
}
impl frame_system::Config for Test {
type Origin = Origin;
@@ -968,9 +966,10 @@ mod tests {
type Event = Event;
type OnValidationData = ();
type SelfParaId = ParachainId;
type DownwardMessageHandlers = SaveIntoThreadLocal;
type XcmpMessageHandler = SaveIntoThreadLocal;
type OutboundXcmpMessageSource = FromThreadLocal;
type DmpMessageHandler = SaveIntoThreadLocal;
type ReservedDmpWeight = ReservedDmpWeight;
type XcmpMessageHandler = SaveIntoThreadLocal;
type ReservedXcmpWeight = ReservedXcmpWeight;
}
@@ -978,7 +977,7 @@ mod tests {
pub struct SaveIntoThreadLocal;
std::thread_local! {
static HANDLED_DOWNWARD_MESSAGES: RefCell<Vec<InboundDownwardMessage>> = RefCell::new(Vec::new());
static HANDLED_DMP_MESSAGES: RefCell<Vec<(relay_chain::BlockNumber, Vec<u8>)>> = RefCell::new(Vec::new());
static HANDLED_XCMP_MESSAGES: RefCell<Vec<(ParaId, relay_chain::BlockNumber, Vec<u8>)>> = RefCell::new(Vec::new());
static SENT_MESSAGES: RefCell<Vec<(ParaId, Vec<u8>)>> = RefCell::new(Vec::new());
}
@@ -1013,12 +1012,17 @@ mod tests {
}
}
impl DownwardMessageHandler for SaveIntoThreadLocal {
fn handle_downward_message(msg: InboundDownwardMessage) -> Weight {
HANDLED_DOWNWARD_MESSAGES.with(|m| {
m.borrow_mut().push(msg);
});
0
impl DmpMessageHandler for SaveIntoThreadLocal {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
_max_weight: Weight,
) -> Weight {
HANDLED_DMP_MESSAGES.with(|m| {
for i in iter {
m.borrow_mut().push(i);
}
0
})
}
}
@@ -1039,7 +1043,7 @@ mod tests {
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> sp_io::TestExternalities {
HANDLED_DOWNWARD_MESSAGES.with(|m| m.borrow_mut().clear());
HANDLED_DMP_MESSAGES.with(|m| m.borrow_mut().clear());
HANDLED_XCMP_MESSAGES.with(|m| m.borrow_mut().clear());
frame_system::GenesisConfig::default()
@@ -1606,9 +1610,9 @@ mod tests {
_ => unreachable!(),
})
.add(1, || {
HANDLED_DOWNWARD_MESSAGES.with(|m| {
HANDLED_DMP_MESSAGES.with(|m| {
let mut m = m.borrow_mut();
assert_eq!(&*m, &[MSG.clone()]);
assert_eq!(&*m, &[(MSG.sent_at, MSG.msg.clone())]);
m.clear();
});
});
@@ -20,7 +20,7 @@ use frame_support::traits::ExecuteBlock;
use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor};
use sp_io::KillChildStorageResult;
use sp_std::{boxed::Box, vec::Vec};
use sp_std::prelude::*;
use hash_db::{HashDB, EMPTY_PREFIX};
@@ -35,13 +35,10 @@ use cumulus_primitives_core::{
HRMP_OUTBOUND_MESSAGES, HRMP_WATERMARK, NEW_VALIDATION_CODE, PROCESSED_DOWNWARD_MESSAGES,
UPWARD_MESSAGES,
},
OutboundHrmpMessage, PersistedValidationData, UpwardMessage,
OutboundHrmpMessage, UpwardMessage,
};
use sp_core::storage::{ChildInfo, TrackedStorageKey};
use sp_externalities::{
set_and_run_with_externalities, Error, Extension, ExtensionStore, Externalities,
};
use sp_std::any::{Any, TypeId};
use sp_core::storage::ChildInfo;
use sp_externalities::{set_and_run_with_externalities, Externalities};
use sp_trie::MemoryDB;
type Ext<'a, B> = sp_state_machine::Ext<
+59 -26
View File
@@ -20,12 +20,13 @@
#![cfg_attr(not(feature = "std"), no_std)]
use sp_std::convert::TryFrom;
use cumulus_primitives_core::{ParaId, DownwardMessageHandler, InboundDownwardMessage};
use sp_std::{prelude::*, convert::TryFrom};
use cumulus_primitives_core::{ParaId, DmpMessageHandler};
use cumulus_primitives_core::relay_chain::BlockNumber as RelayBlockNumber;
use codec::{Encode, Decode};
use sp_runtime::traits::BadOrigin;
use xcm::{VersionedXcm, v0::{Xcm, Junction, Outcome, ExecuteXcm}};
use frame_support::{traits::Get, dispatch::Weight};
use frame_support::dispatch::Weight;
pub use pallet::*;
#[frame_support::pallet]
@@ -45,9 +46,6 @@ pub mod pallet {
type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>;
type XcmExecutor: ExecuteXcm<Self::Call>;
#[pallet::constant]
type MaxWeight: Get<Weight>;
}
#[pallet::error]
@@ -74,33 +72,68 @@ pub mod pallet {
/// \[ id, outcome \]
ExecutedDownward([u8; 8], Outcome),
}
}
/// For an incoming downward message, this just adapts an XCM executor and executes DMP messages
/// immediately up until some `MaxWeight` at which point it errors. Their origin is asserted to be
/// the Parent location.
impl<T: Config> DownwardMessageHandler for Pallet<T> {
fn handle_downward_message(msg: InboundDownwardMessage) -> Weight {
let id = sp_io::hashing::twox_64(&msg.msg[..]);
let msg = VersionedXcm::<T::Call>::decode(&mut &msg.msg[..])
/// For an incoming downward message, this just adapts an XCM executor and executes DMP messages
/// immediately. Their origin is asserted to be the Parent location.
///
/// The weight `limit` is only respected as the maximum for an individual message.
///
/// Because this largely ignores the given weight limit, it probably isn't good for most production
/// uses. Use DmpQueue pallet for a more robust design.
pub struct UnlimitedDmpExecution<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> DmpMessageHandler for UnlimitedDmpExecution<T> {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
limit: Weight,
) -> Weight {
let mut used = 0;
for (_sent_at, data) in iter {
let id = sp_io::hashing::twox_64(&data[..]);
let msg = VersionedXcm::<T::Call>::decode(&mut &data[..])
.map(Xcm::<T::Call>::try_from);
match msg {
Err(_) => Pallet::<T>::deposit_event(Event::InvalidFormat(id)),
Ok(Err(())) => Pallet::<T>::deposit_event(Event::UnsupportedVersion(id)),
Ok(Ok(x)) => {
let weight_limit = T::MaxWeight::get();
let outcome = T::XcmExecutor::execute_xcm(Junction::Parent.into(), x, weight_limit);
let weight_used = outcome.weight_used();
Self::deposit_event(Event::ExecutedDownward(id, outcome));
weight_used
let outcome = T::XcmExecutor::execute_xcm(Junction::Parent.into(), x, limit);
used += outcome.weight_used();
Pallet::<T>::deposit_event(Event::ExecutedDownward(id, outcome));
}
Ok(Err(())) => {
Self::deposit_event(Event::UnsupportedVersion(id));
0
},
Err(_) => {
Self::deposit_event(Event::InvalidFormat(id));
0
},
}
}
used
}
}
/// For an incoming downward message, this just adapts an XCM executor and executes DMP messages
/// immediately. Their origin is asserted to be the Parent location.
///
/// This respects the given weight limit and silently drops messages if they would break it. It
/// probably isn't good for most production uses. Use DmpQueue pallet for a more robust design.
pub struct LimitAndDropDmpExecution<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> DmpMessageHandler for LimitAndDropDmpExecution<T> {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
limit: Weight,
) -> Weight {
let mut used = 0;
for (_sent_at, data) in iter {
let id = sp_io::hashing::twox_64(&data[..]);
let msg = VersionedXcm::<T::Call>::decode(&mut &data[..])
.map(Xcm::<T::Call>::try_from);
match msg {
Err(_) => Pallet::<T>::deposit_event(Event::InvalidFormat(id)),
Ok(Err(())) => Pallet::<T>::deposit_event(Event::UnsupportedVersion(id)),
Ok(Ok(x)) => {
let weight_limit = limit.saturating_sub(used);
let outcome = T::XcmExecutor::execute_xcm(Junction::Parent.into(), x, weight_limit);
used += outcome.weight_used();
Pallet::<T>::deposit_event(Event::ExecutedDownward(id, outcome));
}
}
}
used
}
}
+20 -6
View File
@@ -118,12 +118,23 @@ pub mod well_known_keys {
}
/// Something that should be called when a downward message is received.
pub trait DownwardMessageHandler {
/// Handle the given downward message.
fn handle_downward_message(msg: InboundDownwardMessage) -> Weight;
pub trait DmpMessageHandler {
/// Handle some incoming DMP messages (note these are individual XCM messages).
///
/// Also, process messages up to some `max_weight`.
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
max_weight: Weight,
) -> Weight;
}
impl DownwardMessageHandler for () {
fn handle_downward_message(_msg: InboundDownwardMessage) -> Weight { 0 }
impl DmpMessageHandler for () {
fn handle_dmp_messages(
iter: impl Iterator<Item=(RelayBlockNumber, Vec<u8>)>,
_max_weight: Weight,
) -> Weight {
for _ in iter {}
0
}
}
/// Something that should be called for each batch of messages received over XCMP.
@@ -141,7 +152,10 @@ impl XcmpMessageHandler for () {
fn handle_xcmp_messages<'a, I: Iterator<Item=(ParaId, RelayBlockNumber, &'a [u8])>>(
iter: I,
_max_weight: Weight,
) -> Weight { for _ in iter {} 0 }
) -> Weight {
for _ in iter {}
0
}
}
/// Something that should be called when sending an upward message.
+4 -29
View File
@@ -19,11 +19,10 @@
#![cfg_attr(not(feature = "std"), no_std)]
use sp_std::{marker::PhantomData, convert::TryFrom};
use codec::{Encode, Decode};
use cumulus_primitives_core::{UpwardMessageSender, DownwardMessageHandler, InboundDownwardMessage};
use xcm::{VersionedXcm, v0::{Xcm, MultiLocation, Junction, SendXcm, Error as XcmError, ExecuteXcm}};
use frame_support::{traits::Get, dispatch::Weight};
use sp_std::marker::PhantomData;
use codec::Encode;
use cumulus_primitives_core::UpwardMessageSender;
use xcm::{VersionedXcm, v0::{Xcm, MultiLocation, Junction, SendXcm, Error as XcmError}};
/// Xcm router which recognises the `Parent` destination and handles it by sending the message into
/// the given UMP `UpwardMessageSender` implementation. Thus this essentially adapts an
@@ -51,27 +50,3 @@ impl<T: UpwardMessageSender> SendXcm for ParentAsUmp<T> {
}
}
/// For an incoming downward message, this just adapts an XCM executor and executes DMP messages
/// immediately up until some `MaxWeight` at which point it errors. Their origin is asserted to be
/// the Parent location.
pub struct UnqueuedDmpAsParent<MaxWeight, XcmExecutor, Call>(
PhantomData<(MaxWeight, XcmExecutor, Call)>
);
impl<
MaxWeight: Get<Weight>,
XcmExecutor: ExecuteXcm<Call>,
Call,
> DownwardMessageHandler for UnqueuedDmpAsParent<MaxWeight, XcmExecutor, Call> {
fn handle_downward_message(msg: InboundDownwardMessage) -> Weight {
let msg = VersionedXcm::<Call>::decode(&mut &msg.msg[..])
.map(Xcm::<Call>::try_from);
match msg {
Ok(Ok(x)) => {
let weight_limit = MaxWeight::get();
XcmExecutor::execute_xcm(Junction::Parent.into(), x, weight_limit).weight_used()
}
Ok(Err(..)) => 0,
Err(..) => 0,
}
}
}
@@ -38,6 +38,7 @@ pallet-transaction-payment = { git = "https://github.com/paritytech/substrate",
cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false }
cumulus-primitives-core = { path = "../../primitives/core", default-features = false }
cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false }
cumulus-pallet-dmp-queue = { path = "../../pallets/dmp-queue", default-features = false }
cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-features = false }
cumulus-pallet-xcm = { path = "../../pallets/xcm", default-features = false }
cumulus-ping = { path = "../../rococo-parachains/pallets/ping", default-features = false }
@@ -84,6 +85,7 @@ std = [
"pallet-transaction-payment/std",
"parachain-info/std",
"rococo-parachain-primitives/std",
"cumulus-pallet-dmp-queue/std",
"cumulus-pallet-parachain-system/std",
"cumulus-pallet-xcmp-queue/std",
"cumulus-pallet-xcm/std",
+33 -10
View File
@@ -39,7 +39,7 @@ use sp_version::RuntimeVersion;
// A few exports that help ease life for downstream crates.
pub use frame_support::{
construct_runtime, parameter_types, match_type,
traits::{Randomness, IsInVec, All},
traits::{Randomness, IsInVec},
weights::{
constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND},
DispatchClass, IdentityFee, Weight,
@@ -65,6 +65,8 @@ use xcm_builder::{
};
use xcm_executor::{Config, XcmExecutor};
use pallet_xcm::{XcmPassthrough, EnsureXcm, IsMajorityOfBody};
use xcm::v0::Xcm;
use frame_support::traits::Contains;
pub type SessionHandlers = ();
@@ -77,7 +79,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
spec_name: create_runtime_str!("test-parachain"),
impl_name: create_runtime_str!("test-parachain"),
authoring_version: 1,
spec_version: 9,
spec_version: 11,
impl_version: 0,
apis: RUNTIME_API_VERSIONS,
transaction_version: 1,
@@ -230,14 +232,16 @@ impl pallet_sudo::Config for Runtime {
parameter_types! {
pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4;
pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4;
}
impl cumulus_pallet_parachain_system::Config for Runtime {
type Event = Event;
type OnValidationData = ();
type SelfParaId = parachain_info::Module<Runtime>;
type DownwardMessageHandlers = CumulusXcm;
type OutboundXcmpMessageSource = XcmpQueue;
type DmpMessageHandler = DmpQueue;
type ReservedDmpWeight = ReservedDmpWeight;
type XcmpMessageHandler = XcmpQueue;
type ReservedXcmpWeight = ReservedXcmpWeight;
}
@@ -335,10 +339,6 @@ impl Config for XcmConfig {
type ResponseHandler = (); // Don't handle responses for now.
}
parameter_types! {
pub const MaxDownwardMessageWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 10;
}
/// No local origins on this chain are allowed to dispatch XCM sends/executions.
pub type LocalOriginToLocation = ();
@@ -351,19 +351,25 @@ pub type XcmRouter = (
XcmpQueue,
);
// TODO: Remove to frame_support::traits::All once substrate/8691 merged and bumped
/// A `Contains` implementation which always returns `true`.
pub struct All<T>(sp_std::marker::PhantomData<T>);
impl<T> Contains<T> for All<T> {
fn contains(_: &T) -> bool { true }
}
impl pallet_xcm::Config for Runtime {
type Event = Event;
type SendXcmOrigin = EnsureXcmOrigin<Origin, LocalOriginToLocation>;
type XcmRouter = XcmRouter;
type ExecuteXcmOrigin = EnsureXcmOrigin<Origin, LocalOriginToLocation>;
type XcmExecuteFilter = All<(MultiLocation, xcm::v0::Xcm<Call>)>;
type XcmExecuteFilter = All<(MultiLocation, Xcm<Call>)>;
type XcmExecutor = XcmExecutor<XcmConfig>;
}
impl cumulus_pallet_xcm::Config for Runtime {
type Event = Event;
type XcmExecutor = XcmExecutor<XcmConfig>;
type MaxWeight = MaxDownwardMessageWeight;
}
impl cumulus_pallet_xcmp_queue::Config for Runtime {
@@ -372,6 +378,12 @@ impl cumulus_pallet_xcmp_queue::Config for Runtime {
type ChannelInfo = ParachainSystem;
}
impl cumulus_pallet_dmp_queue::Config for Runtime {
type Event = Event;
type XcmExecutor = XcmExecutor<XcmConfig>;
type ExecuteOverweightOrigin = frame_system::EnsureRoot<AccountId>;
}
impl cumulus_ping::Config for Runtime {
type Event = Event;
type Origin = Origin;
@@ -407,6 +419,16 @@ impl pallet_assets::Config for Runtime {
type WeightInfo = pallet_assets::weights::SubstrateWeight<Runtime>;
}
#[test]
fn encode_call() {
let hash = hex_literal::hex!["0af9fef6f950ca3ac8ac4766200454b1039ffb7b2d0827fffd5e47bd43761437"].into();
let call = Call::ParachainSystem(cumulus_pallet_parachain_system::Call::authorize_upgrade(hash));
assert_eq!(
hex::encode(codec::Encode::encode(&call)),
"14030af9fef6f950ca3ac8ac4766200454b1039ffb7b2d0827fffd5e47bd43761437",
);
}
construct_runtime! {
pub enum Runtime where
Block = Block,
@@ -419,7 +441,7 @@ construct_runtime! {
RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Call, Storage},
TransactionPayment: pallet_transaction_payment::{Pallet, Storage},
ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Storage, Inherent, Event<T>} = 20,
ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Storage, Inherent, Event<T>, ValidateUnsigned} = 20,
ParachainInfo: parachain_info::{Pallet, Storage, Config} = 21,
Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>} = 30,
@@ -429,6 +451,7 @@ construct_runtime! {
XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event<T>} = 50,
PolkadotXcm: pallet_xcm::{Pallet, Call, Event<T>, Origin} = 51,
CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Event<T>, Origin} = 52,
DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event<T>} = 53,
Spambot: cumulus_ping::{Pallet, Call, Storage, Event<T>} = 99,
}
@@ -32,7 +32,7 @@ frame-system = { git = "https://github.com/paritytech/substrate", default-featur
cumulus-pallet-parachain-system = { path = "../../pallets/parachain-system", default-features = false }
cumulus-primitives-core = { path = "../../primitives/core", default-features = false }
cumulus-primitives-utility = { path = "../../primitives/utility", default-features = false }
cumulus-pallet-xcmp-queue = { path = "../../pallets/xcmp-queue", default-features = false }
cumulus-pallet-dmp-queue = { path = "../../pallets/dmp-queue", default-features = false }
cumulus-pallet-xcm = { path = "../../pallets/xcm", default-features = false }
# Polkadot dependencies
@@ -71,7 +71,7 @@ std = [
"parachain-info/std",
"rococo-parachain-primitives/std",
"cumulus-pallet-parachain-system/std",
"cumulus-pallet-xcmp-queue/std",
"cumulus-pallet-dmp-queue/std",
"cumulus-pallet-xcm/std",
"cumulus-primitives-core/std",
"cumulus-primitives-utility/std",
@@ -155,15 +155,16 @@ impl frame_system::Config for Runtime {
parameter_types! {
// We do anything the parent chain tells us in this runtime.
pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT;
pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 2;
}
impl cumulus_pallet_parachain_system::Config for Runtime {
type Event = Event;
type OnValidationData = ();
type SelfParaId = parachain_info::Module<Runtime>;
type DownwardMessageHandlers = CumulusXcm;
type OutboundXcmpMessageSource = ();
type DmpMessageHandler = cumulus_pallet_xcm::UnlimitedDmpExecution<Runtime>;
type ReservedDmpWeight = ReservedDmpWeight;
type XcmpMessageHandler = ();
type ReservedXcmpWeight = ();
}
@@ -198,16 +199,6 @@ parameter_types! {
pub UnitWeightCost: Weight = 1_000_000;
}
pub struct NoTrader;
impl xcm_executor::traits::WeightTrader for NoTrader {
fn new() -> Self { NoTrader }
fn buy_weight(&mut self, _: Weight, _: xcm_executor::Assets)
-> Result<xcm_executor::Assets, xcm::v0::Error>
{
Err(xcm::v0::Error::Unimplemented)
}
}
pub struct XcmConfig;
impl Config for XcmConfig {
type Call = Call;
@@ -219,18 +210,23 @@ impl Config for XcmConfig {
type LocationInverter = LocationInverter<Ancestry>;
type Barrier = AllowUnpaidExecutionFrom<JustTheParent>;
type Weigher = FixedWeightBounds<UnitWeightCost, Call>; // balances not supported
type Trader = NoTrader; // balances not supported
type Trader = (); // balances not supported
type ResponseHandler = (); // Don't handle responses for now.
}
parameter_types! {
pub const MaxDownwardMessageWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 2;
}
impl cumulus_pallet_xcm::Config for Runtime {
type Event = Event;
type XcmExecutor = XcmExecutor<XcmConfig>;
type MaxWeight = MaxDownwardMessageWeight;
}
#[test]
fn encode_call() {
let hash = hex_literal::hex!["0af9fef6f950ca3ac8ac4766200454b1039ffb7b2d0827fffd5e47bd43761437"].into();
let call = Call::ParachainSystem(cumulus_pallet_parachain_system::Call::authorize_upgrade(hash));
assert_eq!(
hex::encode(codec::Encode::encode(&call)),
"01030af9fef6f950ca3ac8ac4766200454b1039ffb7b2d0827fffd5e47bd43761437",
);
}
construct_runtime! {
+2 -1
View File
@@ -218,8 +218,9 @@ impl cumulus_pallet_parachain_system::Config for Runtime {
type SelfParaId = ParachainId;
type Event = Event;
type OnValidationData = ();
type DownwardMessageHandlers = ();
type OutboundXcmpMessageSource = ();
type DmpMessageHandler = ();
type ReservedDmpWeight = ();
type XcmpMessageHandler = ();
type ReservedXcmpWeight = ();
}