feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit e4778b4576
6838 changed files with 1847450 additions and 0 deletions
+68
View File
@@ -0,0 +1,68 @@
[package]
authors.workspace = true
edition.workspace = true
name = "pallet-message-queue"
version = "31.0.0"
license = "Apache-2.0"
homepage.workspace = true
repository.workspace = true
description = "FRAME pallet to queue and process messages"
[lints]
workspace = true
[dependencies]
codec = { features = ["derive"], workspace = true }
environmental = { workspace = true }
log = { workspace = true }
scale-info = { features = ["derive"], workspace = true }
serde = { optional = true, features = [
"derive",
], workspace = true, default-features = true }
sp-arithmetic = { workspace = true }
sp-core = { workspace = true }
sp-io = { workspace = true }
sp-runtime = { workspace = true }
sp-weights = { workspace = true }
frame-benchmarking = { optional = true, workspace = true }
frame-support = { workspace = true }
frame-system = { workspace = true }
[dev-dependencies]
frame-support = { workspace = true, features = ["experimental"] }
rand = { workspace = true, default-features = true }
rand_distr = { workspace = true }
sp-crypto-hashing = { workspace = true, default-features = true }
sp-tracing = { workspace = true, default-features = true }
[features]
default = ["std"]
std = [
"codec/std",
"environmental/std",
"frame-benchmarking?/std",
"frame-support/std",
"frame-system/std",
"log/std",
"scale-info/std",
"sp-arithmetic/std",
"sp-core/std",
"sp-io/std",
"sp-runtime/std",
"sp-tracing/std",
"sp-weights/std",
]
runtime-benchmarks = [
"frame-benchmarking/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"frame-system/runtime-benchmarks",
"sp-io/runtime-benchmarks",
"sp-runtime/runtime-benchmarks",
]
try-runtime = [
"frame-support/try-runtime",
"frame-system/try-runtime",
"sp-runtime/try-runtime",
]
@@ -0,0 +1,298 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Benchmarking for the message queue pallet.
#![cfg(feature = "runtime-benchmarks")]
#![allow(unused_assignments)] // Needed for `ready_ring_knit`.
use super::{mock_helpers::*, Pallet as MessageQueue, *};
use frame_benchmarking::v2::*;
use frame_support::traits::Get;
use frame_system::RawOrigin;
use sp_io::hashing::blake2_256;
#[benchmarks(
where
<<T as Config>::MessageProcessor as ProcessMessage>::Origin: From<u32> + PartialEq,
<T as Config>::Size: From<u32>,
// NOTE: We need to generate multiple origins, therefore Origin is `From<u32>`. The
// `PartialEq` is for asserting the outcome of the ring (un)knitting and *could* be
// removed if really necessary.
)]
mod benchmarks {
use super::*;
// Worst case path of `ready_ring_knit`.
#[benchmark]
fn ready_ring_knit() {
let mid: MessageOriginOf<T> = 1.into();
build_ring::<T>(&[0.into(), mid.clone(), 2.into()]);
unknit::<T>(&mid);
assert_ring::<T>(&[0.into(), 2.into()]);
let mut neighbours = None;
#[block]
{
neighbours = MessageQueue::<T>::ready_ring_knit(&mid).ok();
}
// The neighbours needs to be modified manually.
BookStateFor::<T>::mutate(&mid, |b| b.ready_neighbours = neighbours);
assert_ring::<T>(&[0.into(), 2.into(), mid]);
}
// Worst case path of `ready_ring_unknit`.
#[benchmark]
fn ready_ring_unknit() {
build_ring::<T>(&[0.into(), 1.into(), 2.into()]);
assert_ring::<T>(&[0.into(), 1.into(), 2.into()]);
let o: MessageOriginOf<T> = 0.into();
let neighbours = BookStateFor::<T>::get(&o).ready_neighbours.unwrap();
#[block]
{
MessageQueue::<T>::ready_ring_unknit(&o, neighbours);
}
assert_ring::<T>(&[1.into(), 2.into()]);
}
// `service_queues` without any queue processing.
#[benchmark]
fn service_queue_base() {
#[block]
{
MessageQueue::<T>::service_queue(0.into(), &mut WeightMeter::new(), Weight::MAX);
}
}
// `service_page` without any message processing but with page completion.
#[benchmark]
fn service_page_base_completion() {
let origin: MessageOriginOf<T> = 0.into();
let page = PageOf::<T>::default();
Pages::<T>::insert(&origin, 0, &page);
let mut book_state = single_page_book::<T>();
let mut meter = WeightMeter::new();
let limit = Weight::MAX;
#[block]
{
MessageQueue::<T>::service_page(&origin, &mut book_state, &mut meter, limit);
}
}
// `service_page` without any message processing and without page completion.
#[benchmark]
fn service_page_base_no_completion() {
let origin: MessageOriginOf<T> = 0.into();
let mut page = PageOf::<T>::default();
// Mock the storage such that `is_complete` returns `false` but `peek_first` returns `None`.
page.first = 1.into();
page.remaining = 1.into();
Pages::<T>::insert(&origin, 0, &page);
let mut book_state = single_page_book::<T>();
let mut meter = WeightMeter::new();
let limit = Weight::MAX;
#[block]
{
MessageQueue::<T>::service_page(&origin, &mut book_state, &mut meter, limit);
}
}
// Processing a single message from a page.
#[benchmark]
fn service_page_item() {
let msg = vec![1u8; MaxMessageLenOf::<T>::get() as usize];
let mut page = page::<T>(&msg.clone());
let mut book = book_for::<T>(&page);
assert!(page.peek_first().is_some(), "There is one message");
let mut weight = WeightMeter::new();
#[block]
{
let status = MessageQueue::<T>::service_page_item(
&0u32.into(),
0,
&mut book,
&mut page,
&mut weight,
Weight::MAX,
);
assert_eq!(status, ItemExecutionStatus::Executed(true));
}
// Check that it was processed.
assert_last_event::<T>(
Event::Processed {
id: blake2_256(&msg).into(),
origin: 0.into(),
weight_used: 1.into_weight(),
success: true,
}
.into(),
);
let (_, processed, _) = page.peek_index(0).unwrap();
assert!(processed);
assert_eq!(book.message_count, 0);
}
// Worst case for calling `bump_service_head`.
#[benchmark]
fn bump_service_head() {
setup_bump_service_head::<T>(0.into(), 10.into());
let mut weight = WeightMeter::new();
#[block]
{
MessageQueue::<T>::bump_service_head(&mut weight);
}
assert_eq!(ServiceHead::<T>::get().unwrap(), 10u32.into());
assert_eq!(weight.consumed(), T::WeightInfo::bump_service_head());
}
// Worst case for calling `bump_service_head`.
#[benchmark]
fn set_service_head() {
setup_bump_service_head::<T>(0.into(), 1.into());
let mut weight = WeightMeter::new();
assert_eq!(ServiceHead::<T>::get().unwrap(), 0u32.into());
#[block]
{
assert!(MessageQueue::<T>::set_service_head(&mut weight, &1u32.into()).unwrap());
}
assert_eq!(ServiceHead::<T>::get().unwrap(), 1u32.into());
assert_eq!(weight.consumed(), T::WeightInfo::set_service_head());
}
#[benchmark]
fn reap_page() {
// Mock the storage to get a *cullable* but not *reapable* page.
let origin: MessageOriginOf<T> = 0.into();
let mut book = single_page_book::<T>();
let (page, msgs) = full_page::<T>();
for p in 0..T::MaxStale::get() * T::MaxStale::get() {
if p == 0 {
Pages::<T>::insert(&origin, p, &page);
}
book.end += 1;
book.count += 1;
book.message_count += msgs as u64;
book.size += page.remaining_size.into() as u64;
}
book.begin = book.end - T::MaxStale::get();
BookStateFor::<T>::insert(&origin, &book);
assert!(Pages::<T>::contains_key(&origin, 0));
#[extrinsic_call]
_(RawOrigin::Signed(whitelisted_caller()), 0u32.into(), 0);
assert_last_event::<T>(Event::PageReaped { origin: 0.into(), index: 0 }.into());
assert!(!Pages::<T>::contains_key(&origin, 0));
}
// Worst case for `execute_overweight` where the page is removed as completed.
//
// The worst case occurs when executing the last message in a page of which all are skipped
// since it is using `peek_index` which has linear complexities.
#[benchmark]
fn execute_overweight_page_removed() {
let origin: MessageOriginOf<T> = 0.into();
let (mut page, msgs) = full_page::<T>();
// Skip all messages.
for _ in 1..msgs {
page.skip_first(true);
}
page.skip_first(false);
let book = book_for::<T>(&page);
Pages::<T>::insert(&origin, 0, &page);
BookStateFor::<T>::insert(&origin, &book);
#[block]
{
MessageQueue::<T>::execute_overweight(
RawOrigin::Signed(whitelisted_caller()).into(),
0u32.into(),
0u32,
((msgs - 1) as u32).into(),
Weight::MAX,
)
.unwrap();
}
assert_last_event::<T>(
Event::Processed {
id: blake2_256(&((msgs - 1) as u32).encode()).into(),
origin: 0.into(),
weight_used: Weight::from_parts(1, 1),
success: true,
}
.into(),
);
assert!(!Pages::<T>::contains_key(&origin, 0), "Page must be removed");
}
// Worst case for `execute_overweight` where the page is updated.
#[benchmark]
fn execute_overweight_page_updated() {
let origin: MessageOriginOf<T> = 0.into();
let (mut page, msgs) = full_page::<T>();
// Skip all messages.
for _ in 0..msgs {
page.skip_first(false);
}
let book = book_for::<T>(&page);
Pages::<T>::insert(&origin, 0, &page);
BookStateFor::<T>::insert(&origin, &book);
#[block]
{
MessageQueue::<T>::execute_overweight(
RawOrigin::Signed(whitelisted_caller()).into(),
0u32.into(),
0u32,
((msgs - 1) as u32).into(),
Weight::MAX,
)
.unwrap();
}
assert_last_event::<T>(
Event::Processed {
id: blake2_256(&((msgs - 1) as u32).encode()).into(),
origin: 0.into(),
weight_used: Weight::from_parts(1, 1),
success: true,
}
.into(),
);
assert!(Pages::<T>::contains_key(&origin, 0), "Page must be updated");
}
impl_benchmark_test_suite! {
MessageQueue,
crate::mock::new_test_ext::<crate::integration_test::Test>(),
crate::integration_test::Test
}
}
@@ -0,0 +1,590 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Stress tests pallet-message-queue. Defines its own runtime config to use larger constants for
//! `HeapSize` and `MaxStale`.
//!
//! The tests in this file are ignored by default, since they are quite slow. You can run them
//! manually like this:
//!
//! ```sh
//! RUST_LOG=info cargo test -p pallet-message-queue --profile testnet -- --ignored
//! ```
#![cfg(test)]
use crate::{
mock::{
build_and_execute, gen_seed, set_weight, Callback, CountingMessageProcessor, IntoWeight,
MessagesProcessed, MockedWeightInfo, NumMessagesProcessed, YieldingQueues,
},
mock_helpers::{MessageOrigin, MessageOrigin::Everywhere},
*,
};
use crate as pallet_message_queue;
use frame_support::{derive_impl, parameter_types};
use rand::{rngs::StdRng, Rng, SeedableRng};
use rand_distr::Pareto;
use std::collections::{BTreeMap, BTreeSet};
type Block = frame_system::mocking::MockBlock<Test>;
frame_support::construct_runtime!(
pub enum Test
{
System: frame_system,
MessageQueue: pallet_message_queue,
}
);
#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
impl frame_system::Config for Test {
type Block = Block;
}
parameter_types! {
pub const HeapSize: u32 = 32 * 1024;
pub const MaxStale: u32 = 32;
pub static ServiceWeight: Option<Weight> = Some(Weight::from_parts(100, 100));
}
impl Config for Test {
type RuntimeEvent = RuntimeEvent;
type WeightInfo = MockedWeightInfo;
type MessageProcessor = CountingMessageProcessor;
type Size = u32;
type QueueChangeHandler = AhmPrioritizer;
type QueuePausedQuery = ();
type HeapSize = HeapSize;
type MaxStale = MaxStale;
type ServiceWeight = ServiceWeight;
type IdleMaxServiceWeight = ();
}
/// The object that does the AHM message prioritization for us.
#[derive(Debug, Default, codec::Encode, codec::Decode)]
pub struct AhmPrioritizer {
streak_until: Option<u64>,
prioritized_queue: Option<MessageOriginOf<Test>>,
favorite_queue_num_messages: Option<u64>,
}
// The whole `AhmPrioritizer` could be part of the AHM controller pallet.
parameter_types! {
pub storage AhmPrioritizerStorage: AhmPrioritizer = AhmPrioritizer::default();
}
/// Instead of giving our prioritized queue only one block, we give it a streak of blocks.
const STREAK_LEN: u64 = 3;
impl OnQueueChanged<MessageOrigin> for AhmPrioritizer {
fn on_queue_changed(origin: MessageOrigin, f: QueueFootprint) {
let mut this = AhmPrioritizerStorage::get();
if this.prioritized_queue != Some(origin) {
return;
}
// Return early if this was an enqueue instead of a dequeue.
if this.favorite_queue_num_messages.map_or(false, |n| n <= f.storage.count) {
return;
}
this.favorite_queue_num_messages = Some(f.storage.count);
// only update when we are not already in a streak
if this.streak_until.map_or(false, |s| s < System::block_number()) {
this.streak_until = Some(System::block_number().saturating_add(STREAK_LEN));
}
}
}
impl AhmPrioritizer {
// This will need to be called by the migration controller.
fn on_initialize(now: u64) -> Weight {
let mut meter = WeightMeter::new();
let mut this = AhmPrioritizerStorage::get();
let Some(q) = this.prioritized_queue else {
return meter.consumed();
};
// init
if this.streak_until.is_none() {
this.streak_until = Some(0);
}
if this.favorite_queue_num_messages.is_none() {
this.favorite_queue_num_messages = Some(0);
}
// Our queue did not get a streak since 10 blocks. It must either be empty or starved:
if Pallet::<Test>::footprint(q).pages == 0 {
return meter.consumed();
}
if this.streak_until.map_or(false, |until| until < now.saturating_sub(10)) {
log::warn!("Queue is being starved, scheduling streak of {} blocks", STREAK_LEN);
this.streak_until = Some(now.saturating_add(STREAK_LEN));
}
if this.streak_until.map_or(false, |until| until > now) {
let _ = Pallet::<Test>::force_set_head(&mut meter, &q).defensive();
}
meter.consumed()
}
}
impl Drop for AhmPrioritizer {
fn drop(&mut self) {
AhmPrioritizerStorage::set(self);
}
}
/// Simulates heavy usage by enqueueing and processing large amounts of messages.
///
/// # Example output
///
/// ```pre
/// Enqueued 1189 messages across 176 queues. Payload 46.97 KiB
/// Processing 772 of 1189 messages
/// Enqueued 9270 messages across 1559 queues. Payload 131.85 KiB
/// Processing 6262 of 9687 messages
/// Enqueued 5025 messages across 1225 queues. Payload 100.23 KiB
/// Processing 1739 of 8450 messages
/// Enqueued 42061 messages across 6357 queues. Payload 536.29 KiB
/// Processing 11675 of 48772 messages
/// Enqueued 20253 messages across 2420 queues. Payload 288.34 KiB
/// Processing 28711 of 57350 messages
/// Processing all remaining 28639 messages
/// ```
#[test]
#[ignore] // Only run in the CI, otherwise its too slow.
fn stress_test_enqueue_and_service() {
let blocks = 20;
let max_queues = 10_000;
let max_messages_per_queue = 10_000;
let max_msg_len = MaxMessageLenOf::<Test>::get();
let mut rng = StdRng::seed_from_u64(gen_seed());
build_and_execute::<Test>(|| {
let mut msgs_remaining = 0;
for _ in 0..blocks {
// Start by enqueuing a large number of messages.
let enqueued =
enqueue_messages(max_queues, max_messages_per_queue, max_msg_len, &mut rng);
msgs_remaining += enqueued;
// Pick a fraction of all messages currently in queue and process them.
let processed = rng.gen_range(1..=msgs_remaining);
log::info!("Processing {} of all messages {}", processed, msgs_remaining);
process_some_messages(processed); // This also advances the block.
msgs_remaining -= processed;
}
log::info!("Processing all remaining {} messages", msgs_remaining);
process_all_messages(msgs_remaining);
post_conditions();
});
}
/// Simulate heavy usage while calling `force_set_head` on random queues.
#[test]
#[ignore] // Only run in the CI, otherwise its too slow.
fn stress_test_force_set_head() {
let blocks = 20;
let max_queues = 10_000;
let max_messages_per_queue = 10_000;
let max_msg_len = MaxMessageLenOf::<Test>::get();
let mut rng = StdRng::seed_from_u64(gen_seed());
build_and_execute::<Test>(|| {
let mut msgs_remaining = 0;
for _ in 0..blocks {
// Start by enqueuing a large number of messages.
let enqueued =
enqueue_messages(max_queues, max_messages_per_queue, max_msg_len, &mut rng);
msgs_remaining += enqueued;
for _ in 0..10 {
let random_queue = rng.gen_range(0..=max_queues);
MessageQueue::force_set_head(&mut WeightMeter::new(), &Everywhere(random_queue))
.unwrap();
}
// Pick a fraction of all messages currently in queue and process them.
let processed = rng.gen_range(1..=msgs_remaining);
log::info!("Processing {} of all messages {}", processed, msgs_remaining);
process_some_messages(processed); // This also advances the block.
msgs_remaining -= processed;
}
log::info!("Processing all remaining {} messages", msgs_remaining);
process_all_messages(msgs_remaining);
post_conditions();
});
}
/// Check that our AHM prioritization does not affect liveness. This does not really check the AHM
/// prioritization works itself, but rather that it does not break things. The actual test is in
/// another test below.
#[test]
#[ignore] // Only run in the CI, otherwise its too slow.
fn stress_test_prioritize_queue() {
let blocks = 20;
let max_queues = 10_000;
let favorite_queue = Everywhere(9000);
let max_messages_per_queue = 1_000;
let max_msg_len = MaxMessageLenOf::<Test>::get();
let mut rng = StdRng::seed_from_u64(gen_seed());
build_and_execute::<Test>(|| {
let mut prio = AhmPrioritizerStorage::get();
prio.prioritized_queue = Some(favorite_queue);
drop(prio);
let mut msgs_remaining = 0;
for _ in 0..blocks {
// Start by enqueuing a large number of messages.
let enqueued =
enqueue_messages(max_queues, max_messages_per_queue, max_msg_len, &mut rng);
msgs_remaining += enqueued;
// ensure that our favorite queue always has some more messages
for _ in 0..200 {
MessageQueue::enqueue_message(
BoundedSlice::defensive_truncate_from("favorite".as_bytes()),
favorite_queue,
);
msgs_remaining += 1;
}
// Pick a fraction of all messages currently in queue and process them.
let processed = rng.gen_range(1..=100);
log::info!("Processing {} of all messages {}", processed, msgs_remaining);
process_some_messages(processed); // This also advances the block.
msgs_remaining -= processed;
}
log::info!("Processing all remaining {} messages", msgs_remaining);
process_all_messages(msgs_remaining);
post_conditions();
});
}
/// Very similar to `stress_test_enqueue_and_service`, but enqueues messages while processing them.
#[test]
#[ignore] // Only run in the CI, otherwise its too slow.
fn stress_test_recursive() {
let blocks = 20;
let mut rng = StdRng::seed_from_u64(gen_seed());
// We need to use thread-locals since the callback cannot capture anything.
parameter_types! {
pub static TotalEnqueued: u32 = 0;
pub static Enqueued: u32 = 0;
pub static Called: u32 = 0;
}
Called::take();
Enqueued::take();
TotalEnqueued::take();
Callback::set(Box::new(|_, _| {
let mut rng = StdRng::seed_from_u64(Enqueued::get() as u64);
let max_queues = 1_000;
let max_messages_per_queue = 1_000;
let max_msg_len = MaxMessageLenOf::<Test>::get();
// Instead of directly enqueueing, we enqueue inside a `service` call.
let enqueued = enqueue_messages(max_queues, max_messages_per_queue, max_msg_len, &mut rng);
TotalEnqueued::set(TotalEnqueued::get() + enqueued);
Enqueued::set(Enqueued::get() + enqueued);
Called::set(Called::get() + 1);
Ok(())
}));
build_and_execute::<Test>(|| {
let mut msgs_remaining = 0;
for b in 0..blocks {
log::info!("Block #{}", b);
MessageQueue::enqueue_message(
BoundedSlice::defensive_truncate_from(format!("callback={b}").as_bytes()),
b.into(),
);
msgs_remaining += Enqueued::take() + 1;
// Pick a fraction of all messages currently in queue and process them.
let processed = rng.gen_range(1..=msgs_remaining);
log::info!("Processing {} of all messages {}", processed, msgs_remaining);
process_some_messages(processed); // This also advances the block.
msgs_remaining -= processed;
TotalEnqueued::set(TotalEnqueued::get() - processed + 1);
MessageQueue::do_try_state().unwrap();
}
while Called::get() < blocks {
msgs_remaining += Enqueued::take();
// Pick a fraction of all messages currently in queue and process them.
let processed = rng.gen_range(1..=msgs_remaining);
log::info!("Processing {} of all messages {}", processed, msgs_remaining);
process_some_messages(processed); // This also advances the block.
msgs_remaining -= processed;
TotalEnqueued::set(TotalEnqueued::get() - processed);
MessageQueue::do_try_state().unwrap();
}
let msgs_remaining = TotalEnqueued::take();
log::info!("Processing all remaining {} messages", msgs_remaining);
process_all_messages(msgs_remaining);
assert_eq!(Called::get(), blocks);
post_conditions();
});
}
/// Simulates heavy usage of the suspension logic via `Yield`.
///
/// # Example output
///
/// ```pre
/// Enqueued 11776 messages across 2526 queues. Payload 173.94 KiB
/// Suspended 63 and resumed 7 queues of 2526 in total
/// Processing 593 messages. Resumed msgs: 11599, All msgs: 11776
/// Enqueued 30104 messages across 5533 queues. Payload 416.62 KiB
/// Suspended 24 and resumed 15 queues of 5533 in total
/// Processing 12841 messages. Resumed msgs: 40857, All msgs: 41287
/// Processing all 28016 remaining resumed messages
/// Resumed all 64 suspended queues
/// Processing all remaining 430 messages
/// ```
#[test]
#[ignore] // Only run in the CI, otherwise its too slow.
fn stress_test_queue_suspension() {
let blocks = 20;
let max_queues = 10_000;
let max_messages_per_queue = 10_000;
let (max_suspend_per_block, max_resume_per_block) = (100, 50);
let max_msg_len = MaxMessageLenOf::<Test>::get();
let mut rng = StdRng::seed_from_u64(gen_seed());
build_and_execute::<Test>(|| {
let mut suspended = BTreeSet::<u32>::new();
let mut msgs_remaining = 0;
for _ in 0..blocks {
// Start by enqueuing a large number of messages.
let enqueued =
enqueue_messages(max_queues, max_messages_per_queue, max_msg_len, &mut rng);
msgs_remaining += enqueued;
let per_queue = msgs_per_queue();
// Suspend a random subset of queues.
let to_suspend = rng.gen_range(0..max_suspend_per_block).min(per_queue.len());
for _ in 0..to_suspend {
let q = rng.gen_range(0..per_queue.len());
suspended.insert(*per_queue.iter().nth(q).map(|(q, _)| q).unwrap());
}
// Resume a random subst of suspended queues.
let to_resume = rng.gen_range(0..max_resume_per_block).min(suspended.len());
for _ in 0..to_resume {
let q = rng.gen_range(0..suspended.len());
suspended.remove(&suspended.iter().nth(q).unwrap().clone());
}
log::info!(
"Suspended {} and resumed {} queues of {} in total",
to_suspend,
to_resume,
per_queue.len()
);
YieldingQueues::set(suspended.iter().map(|q| MessageOrigin::Everywhere(*q)).collect());
// Pick a fraction of all messages currently in queue and process them.
let resumed_messages =
per_queue.iter().filter(|(q, _)| !suspended.contains(q)).map(|(_, n)| n).sum();
let processed = rng.gen_range(1..=resumed_messages);
log::info!(
"Processing {} messages. Resumed msgs: {}, All msgs: {}",
processed,
resumed_messages,
msgs_remaining
);
process_some_messages(processed); // This also advances the block.
msgs_remaining -= processed;
}
let per_queue = msgs_per_queue();
let resumed_messages =
per_queue.iter().filter(|(q, _)| !suspended.contains(q)).map(|(_, n)| n).sum();
log::info!("Processing all {} remaining resumed messages", resumed_messages);
process_all_messages(resumed_messages);
msgs_remaining -= resumed_messages;
let resumed = YieldingQueues::take();
log::info!("Resumed all {} suspended queues", resumed.len());
log::info!("Processing all remaining {} messages", msgs_remaining);
process_all_messages(msgs_remaining);
post_conditions();
});
}
/// Test that our AHM prioritizer will ensure that our favorite queue always gets some dedicated
/// weight.
#[test]
#[ignore]
fn stress_test_ahm_despair_mode_works() {
build_and_execute::<Test>(|| {
let blocks = 200;
let queues = 200;
for o in 0..queues {
for i in 0..100 {
MessageQueue::enqueue_message(
BoundedSlice::defensive_truncate_from(format!("{}:{}", o, i).as_bytes()),
Everywhere(o),
);
}
}
set_weight("bump_head", Weight::from_parts(1, 1));
// Prioritize the last queue.
let mut prio = AhmPrioritizerStorage::get();
prio.prioritized_queue = Some(Everywhere(199));
drop(prio);
ServiceWeight::set(Some(Weight::from_parts(10, 10)));
for _ in 0..blocks {
next_block();
}
// Check that our favorite queue has processed the most messages.
let mut min = u64::MAX;
let mut min_origin = 0;
for o in 0..queues {
let fp = MessageQueue::footprint(Everywhere(o));
if fp.storage.count < min {
min = fp.storage.count;
min_origin = o;
}
}
assert_eq!(min_origin, 199);
// Process all remaining messages.
ServiceWeight::set(Some(Weight::MAX));
next_block();
post_conditions();
});
}
/// How many messages are in each queue.
fn msgs_per_queue() -> BTreeMap<u32, u32> {
let mut per_queue = BTreeMap::new();
for (o, q) in BookStateFor::<Test>::iter() {
let MessageOrigin::Everywhere(o) = o else {
unreachable!();
};
per_queue.insert(o, q.message_count as u32);
}
per_queue
}
/// Enqueue a random number of random messages into a random number of queues.
///
/// Returns the total number of enqueued messages, their combined length and the number of messages
/// per queue.
fn enqueue_messages(
max_queues: u32,
max_per_queue: u32,
max_msg_len: u32,
rng: &mut StdRng,
) -> u32 {
let num_queues = rng.gen_range(1..max_queues);
let mut num_messages = 0;
let mut total_msg_len = 0;
for origin in 0..num_queues {
let num_messages_per_queue =
(rng.sample(Pareto::new(1.0, 1.1).unwrap()) as u32).min(max_per_queue);
for m in 0..num_messages_per_queue {
let mut message = format!("{}:{}", &origin, &m).into_bytes();
let msg_len = (rng.sample(Pareto::new(1.0, 1.0).unwrap()) as u32)
.clamp(message.len() as u32, max_msg_len);
message.resize(msg_len as usize, 0);
MessageQueue::enqueue_message(
BoundedSlice::defensive_truncate_from(&message),
origin.into(),
);
total_msg_len += msg_len;
}
num_messages += num_messages_per_queue;
}
log::info!(
"Enqueued {} messages across {} queues. Payload {:.2} KiB",
num_messages,
num_queues,
total_msg_len as f64 / 1024.0
);
num_messages
}
/// Process the number of messages.
fn process_some_messages(num_msgs: u32) {
let weight = (num_msgs as u64).into_weight();
ServiceWeight::set(Some(weight));
let consumed = next_block();
for origin in BookStateFor::<Test>::iter_keys() {
let fp = MessageQueue::footprint(origin);
assert_eq!(fp.pages, fp.ready_pages);
}
assert_eq!(consumed, weight, "\n{}", MessageQueue::debug_info());
assert_eq!(NumMessagesProcessed::take(), num_msgs as usize);
}
/// Process all remaining messages and assert their number.
fn process_all_messages(expected: u32) {
ServiceWeight::set(Some(Weight::MAX));
let consumed = next_block();
assert_eq!(consumed, Weight::from_all(expected as u64));
assert_eq!(NumMessagesProcessed::take(), expected as usize);
MessagesProcessed::take();
}
/// Returns the weight consumed by `MessageQueue::on_initialize()`.
fn next_block() -> Weight {
log::info!("Next block: {}", System::block_number() + 1);
MessageQueue::on_finalize(System::block_number());
System::on_finalize(System::block_number());
System::set_block_number(System::block_number() + 1);
System::on_initialize(System::block_number());
AhmPrioritizer::on_initialize(System::block_number());
MessageQueue::on_initialize(System::block_number())
}
/// Assert that the pallet is in the expected post state.
fn post_conditions() {
// All queues are empty.
for (_, book) in BookStateFor::<Test>::iter() {
assert!(book.end >= book.begin);
assert_eq!(book.count, 0);
assert_eq!(book.size, 0);
assert_eq!(book.message_count, 0);
assert!(book.ready_neighbours.is_none());
}
// No pages remain.
assert_eq!(Pages::<Test>::iter().count(), 0);
// Service head is gone.
assert!(ServiceHead::<Test>::get().is_none());
// This still works fine.
assert_eq!(MessageQueue::service_queues(Weight::MAX), Weight::zero(), "Nothing left");
MessageQueue::do_try_state().unwrap();
next_block();
}
File diff suppressed because it is too large Load Diff
+395
View File
@@ -0,0 +1,395 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Test helpers and runtime setup for the message queue pallet.
#![cfg(test)]
pub use super::mock_helpers::*;
use super::*;
use crate as pallet_message_queue;
use alloc::collections::btree_map::BTreeMap;
use frame_support::{derive_impl, parameter_types};
use sp_runtime::BuildStorage;
type Block = frame_system::mocking::MockBlock<Test>;
frame_support::construct_runtime!(
pub enum Test
{
System: frame_system,
MessageQueue: pallet_message_queue,
}
);
#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
impl frame_system::Config for Test {
type Block = Block;
}
parameter_types! {
pub const HeapSize: u32 = 40;
pub const MaxStale: u32 = 2;
pub const ServiceWeight: Option<Weight> = Some(Weight::from_parts(100, 100));
}
impl Config for Test {
type RuntimeEvent = RuntimeEvent;
type WeightInfo = MockedWeightInfo;
type MessageProcessor = RecordingMessageProcessor;
type Size = u32;
type QueueChangeHandler = RecordingQueueChangeHandler;
type QueuePausedQuery = MockedQueuePauser;
type HeapSize = HeapSize;
type MaxStale = MaxStale;
type ServiceWeight = ServiceWeight;
type IdleMaxServiceWeight = ServiceWeight;
}
/// Mocked `WeightInfo` impl with allows to set the weight per call.
pub struct MockedWeightInfo;
parameter_types! {
/// Storage for `MockedWeightInfo`, do not use directly.
pub static WeightForCall: BTreeMap<String, Weight> = Default::default();
pub static DefaultWeightForCall: Weight = Weight::zero();
}
/// Set the return value for a function from the `WeightInfo` trait.
impl MockedWeightInfo {
/// Set the weight of a specific weight function.
pub fn set_weight<T: Config>(call_name: &str, weight: Weight) {
let mut calls = WeightForCall::get();
calls.insert(call_name.into(), weight);
WeightForCall::set(calls);
}
}
impl crate::weights::WeightInfo for MockedWeightInfo {
fn reap_page() -> Weight {
WeightForCall::get()
.get("reap_page")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn execute_overweight_page_updated() -> Weight {
WeightForCall::get()
.get("execute_overweight_page_updated")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn execute_overweight_page_removed() -> Weight {
WeightForCall::get()
.get("execute_overweight_page_removed")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn service_page_base_completion() -> Weight {
WeightForCall::get()
.get("service_page_base_completion")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn service_page_base_no_completion() -> Weight {
WeightForCall::get()
.get("service_page_base_no_completion")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn service_queue_base() -> Weight {
WeightForCall::get()
.get("service_queue_base")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn bump_service_head() -> Weight {
WeightForCall::get()
.get("bump_service_head")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn set_service_head() -> Weight {
WeightForCall::get()
.get("set_service_head")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn service_page_item() -> Weight {
WeightForCall::get()
.get("service_page_item")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn ready_ring_knit() -> Weight {
WeightForCall::get()
.get("ready_ring_knit")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
fn ready_ring_unknit() -> Weight {
WeightForCall::get()
.get("ready_ring_unknit")
.copied()
.unwrap_or(DefaultWeightForCall::get())
}
}
parameter_types! {
pub static MessagesProcessed: Vec<(Vec<u8>, MessageOrigin)> = vec![];
/// Queues that should return `Yield` upon being processed.
pub static YieldingQueues: Vec<MessageOrigin> = vec![];
}
/// A message processor which records all processed messages into [`MessagesProcessed`].
pub struct RecordingMessageProcessor;
impl ProcessMessage for RecordingMessageProcessor {
/// The transport from where a message originates.
type Origin = MessageOrigin;
/// Process the given message, using no more than `weight_limit` in weight to do so.
///
/// Consumes exactly `n` weight of all components if it starts `weight=n` and `1` otherwise.
/// Errors if given the `weight_limit` is insufficient to process the message or if the message
/// is `badformat`, `corrupt` or `unsupported` with the respective error.
fn process_message(
message: &[u8],
origin: Self::Origin,
meter: &mut WeightMeter,
_id: &mut [u8; 32],
) -> Result<bool, ProcessMessageError> {
processing_message(message, &origin)?;
let weight = if message.starts_with(&b"weight="[..]) {
let mut w: u64 = 0;
for &c in &message[7..] {
if (b'0'..=b'9').contains(&c) {
w = w * 10 + (c - b'0') as u64;
} else {
break;
}
}
w
} else {
1
};
let required = Weight::from_parts(weight, weight);
if meter.try_consume(required).is_ok() {
if let Some(p) = message.strip_prefix(&b"callback="[..]) {
let s = String::from_utf8(p.to_vec()).expect("Need valid UTF8");
if let Err(()) = Callback::get()(&origin, s.parse().expect("Expected an u32")) {
return Err(ProcessMessageError::Corrupt);
}
if s.contains("000") {
return Ok(false);
}
}
let mut m = MessagesProcessed::get();
m.push((message.to_vec(), origin));
MessagesProcessed::set(m);
Ok(true)
} else {
Err(ProcessMessageError::Overweight(required))
}
}
}
parameter_types! {
pub static Callback: Box<fn (&MessageOrigin, u32) -> Result<(), ()>> = Box::new(|_, _| { Ok(()) });
pub static IgnoreStackOvError: bool = false;
}
/// Processed a mocked message. Messages that end with `badformat`, `corrupt`, `unsupported` or
/// `yield` will fail with an error respectively.
fn processing_message(msg: &[u8], origin: &MessageOrigin) -> Result<(), ProcessMessageError> {
if YieldingQueues::get().contains(&origin) {
return Err(ProcessMessageError::Yield);
}
let msg = String::from_utf8_lossy(msg);
if msg.ends_with("badformat") {
Err(ProcessMessageError::BadFormat)
} else if msg.ends_with("corrupt") {
Err(ProcessMessageError::Corrupt)
} else if msg.ends_with("unsupported") {
Err(ProcessMessageError::Unsupported)
} else if msg.ends_with("yield") {
Err(ProcessMessageError::Yield)
} else if msg.ends_with("stacklimitreached") && !IgnoreStackOvError::get() {
Err(ProcessMessageError::StackLimitReached)
} else {
Ok(())
}
}
parameter_types! {
pub static NumMessagesProcessed: usize = 0;
pub static NumMessagesErrored: usize = 0;
}
/// Similar to [`RecordingMessageProcessor`] but only counts the number of messages processed and
/// does always consume one weight per message.
///
/// The [`RecordingMessageProcessor`] is a bit too slow for the integration tests.
pub struct CountingMessageProcessor;
impl ProcessMessage for CountingMessageProcessor {
type Origin = MessageOrigin;
fn process_message(
message: &[u8],
origin: Self::Origin,
meter: &mut WeightMeter,
_id: &mut [u8; 32],
) -> Result<bool, ProcessMessageError> {
if let Err(e) = processing_message(message, &origin) {
NumMessagesErrored::set(NumMessagesErrored::get() + 1);
return Err(e);
}
let required = Weight::from_parts(1, 1);
if meter.try_consume(required).is_ok() {
if let Some(p) = message.strip_prefix(&b"callback="[..]) {
let s = String::from_utf8(p.to_vec()).expect("Need valid UTF8");
if let Err(()) = Callback::get()(&origin, s.parse().expect("Expected an u32")) {
return Err(ProcessMessageError::Corrupt);
}
}
NumMessagesProcessed::set(NumMessagesProcessed::get() + 1);
Ok(true)
} else {
Err(ProcessMessageError::Overweight(required))
}
}
}
parameter_types! {
/// Storage for `RecordingQueueChangeHandler`, do not use directly.
pub static QueueChanges: Vec<(MessageOrigin, u64, u64)> = vec![];
}
/// Records all queue changes into [`QueueChanges`].
pub struct RecordingQueueChangeHandler;
impl OnQueueChanged<MessageOrigin> for RecordingQueueChangeHandler {
fn on_queue_changed(id: MessageOrigin, fp: QueueFootprint) {
QueueChanges::mutate(|cs| cs.push((id, fp.storage.count, fp.storage.size)));
}
}
parameter_types! {
pub static PausedQueues: Vec<MessageOrigin> = vec![];
}
pub struct MockedQueuePauser;
impl QueuePausedQuery<MessageOrigin> for MockedQueuePauser {
fn is_paused(id: &MessageOrigin) -> bool {
PausedQueues::get().contains(id)
}
}
/// Create new test externalities.
///
/// Is generic since it is used by the unit test, integration tests and benchmarks.
pub fn new_test_ext<T: Config>() -> sp_io::TestExternalities
where
frame_system::pallet_prelude::BlockNumberFor<T>: From<u32>,
{
sp_tracing::try_init_simple();
WeightForCall::take();
QueueChanges::take();
NumMessagesErrored::take();
let t = frame_system::GenesisConfig::<T>::default().build_storage().unwrap();
let mut ext = sp_io::TestExternalities::new(t);
ext.execute_with(|| frame_system::Pallet::<T>::set_block_number(1.into()));
ext
}
/// Run the function pointer inside externalities and asserts the try_state hook at the end.
pub fn build_and_execute<T: Config>(test: impl FnOnce() -> ())
where
BlockNumberFor<T>: From<u32>,
{
new_test_ext::<T>().execute_with(|| {
test();
pallet_message_queue::Pallet::<T>::do_try_state()
.expect("All invariants must hold after a test");
});
}
/// Set the weight of a specific weight function.
pub fn set_weight(name: &str, w: Weight) {
MockedWeightInfo::set_weight::<Test>(name, w);
}
/// Assert that exactly these pages are present. Assumes `Here` origin.
pub fn assert_pages(indices: &[u32]) {
assert_eq!(
Pages::<Test>::iter_keys().count(),
indices.len(),
"Wrong number of pages in the queue"
);
for i in indices {
assert!(Pages::<Test>::contains_key(MessageOrigin::Here, i));
}
}
/// Build a ring with three queues: `Here`, `There` and `Everywhere(0)`.
pub fn build_triple_ring() {
use MessageOrigin::*;
build_ring::<Test>(&[Here, There, Everywhere(0)])
}
/// Shim to get rid of the annoying `::<Test>` everywhere.
pub fn assert_ring(queues: &[MessageOrigin]) {
super::mock_helpers::assert_ring::<Test>(queues);
}
pub fn knit(queue: &MessageOrigin) {
super::mock_helpers::knit::<Test>(queue);
}
pub fn unknit(queue: &MessageOrigin) {
super::mock_helpers::unknit::<Test>(queue);
}
pub fn num_overweight_enqueued_events() -> u32 {
frame_system::Pallet::<Test>::events()
.into_iter()
.filter(|e| {
matches!(e.event, RuntimeEvent::MessageQueue(crate::Event::OverweightEnqueued { .. }))
})
.count() as u32
}
pub fn fp(pages: u32, ready_pages: u32, count: u64, size: u64) -> QueueFootprint {
QueueFootprint { storage: Footprint { count, size }, pages, ready_pages }
}
/// A random seed that can be overwritten with `MQ_SEED`.
pub fn gen_seed() -> u64 {
use rand::Rng;
let seed = if let Ok(seed) = std::env::var("MQ_SEED") {
seed.parse().expect("Need valid u64 as MQ_SEED env variable")
} else {
rand::thread_rng().gen::<u64>()
};
println!("Using seed: {}", seed);
seed
}
@@ -0,0 +1,204 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(missing_docs)]
//! Std setup helpers for testing and benchmarking.
//!
//! Cannot be put into mock.rs since benchmarks require no-std and mock.rs is std.
use crate::*;
use alloc::vec::Vec;
use frame_support::traits::Defensive;
/// Converts `Self` into a `Weight` by using `Self` for all components.
pub trait IntoWeight {
fn into_weight(self) -> Weight;
}
impl IntoWeight for u64 {
fn into_weight(self) -> Weight {
Weight::from_parts(self, self)
}
}
/// Mocked message origin for testing.
#[derive(
Copy,
Clone,
Eq,
PartialEq,
Encode,
Decode,
DecodeWithMemTracking,
MaxEncodedLen,
TypeInfo,
Debug,
)]
pub enum MessageOrigin {
Here,
There,
Everywhere(u32),
}
impl From<u32> for MessageOrigin {
fn from(i: u32) -> Self {
Self::Everywhere(i)
}
}
/// Processes any message and consumes `(REQUIRED_WEIGHT, REQUIRED_WEIGHT)` weight.
///
/// Returns [ProcessMessageError::Overweight] error if the weight limit is not sufficient.
pub struct NoopMessageProcessor<Origin, const REQUIRED_WEIGHT: u64 = 1>(PhantomData<Origin>);
impl<Origin, const REQUIRED_WEIGHT: u64> ProcessMessage
for NoopMessageProcessor<Origin, REQUIRED_WEIGHT>
where
Origin: codec::FullCodec + MaxEncodedLen + Clone + Eq + PartialEq + TypeInfo + Debug,
{
type Origin = Origin;
fn process_message(
_message: &[u8],
_origin: Self::Origin,
meter: &mut WeightMeter,
_id: &mut [u8; 32],
) -> Result<bool, ProcessMessageError> {
let required = Weight::from_parts(REQUIRED_WEIGHT, REQUIRED_WEIGHT);
if meter.try_consume(required).is_ok() {
Ok(true)
} else {
Err(ProcessMessageError::Overweight(required))
}
}
}
/// Create a message from the given data.
pub fn msg<N: Get<u32>>(x: &str) -> BoundedSlice<'_, u8, N> {
BoundedSlice::defensive_truncate_from(x.as_bytes())
}
pub fn vmsg(x: &str) -> Vec<u8> {
x.as_bytes().to_vec()
}
/// Create a page from a single message.
pub fn page<T: Config>(msg: &[u8]) -> PageOf<T> {
PageOf::<T>::from_message::<T>(msg.try_into().unwrap())
}
/// Create a book with a single message of one byte.
pub fn single_page_book<T: Config>() -> BookStateOf<T> {
BookState { begin: 0, end: 1, count: 1, message_count: 1, size: 1, ..Default::default() }
}
/// Create an empty book.
pub fn empty_book<T: Config>() -> BookStateOf<T> {
BookState { begin: 0, end: 1, count: 1, ..Default::default() }
}
/// Returns a full page of messages with their index as payload and the number of messages.
pub fn full_page<T: Config>() -> (PageOf<T>, usize) {
let mut msgs = 0;
let mut page = PageOf::<T>::default();
for i in 0..u32::MAX {
let r = i.using_encoded(|d| page.try_append_message::<T>(d.try_into().unwrap()));
if r.is_err() {
break;
} else {
msgs += 1;
}
}
assert!(msgs > 0, "page must hold at least one message");
(page, msgs)
}
/// Returns a page filled with empty messages and the number of messages.
pub fn book_for<T: Config>(page: &PageOf<T>) -> BookStateOf<T> {
BookState {
count: 1,
begin: 0,
end: 1,
message_count: page.remaining.into() as u64,
size: page.remaining_size.into() as u64,
..Default::default()
}
}
/// Assert the last event that was emitted.
#[cfg(any(feature = "std", feature = "runtime-benchmarks", test))]
pub fn assert_last_event<T: Config>(generic_event: <T as Config>::RuntimeEvent) {
assert!(
!frame_system::Pallet::<T>::block_number().is_zero(),
"The genesis block has n o events"
);
frame_system::Pallet::<T>::assert_last_event(generic_event.into());
}
/// Provide a setup for `bump_service_head`.
pub fn setup_bump_service_head<T: Config>(
current: <<T as Config>::MessageProcessor as ProcessMessage>::Origin,
next: <<T as Config>::MessageProcessor as ProcessMessage>::Origin,
) {
crate::Pallet::<T>::enqueue_message(msg("1"), current);
crate::Pallet::<T>::enqueue_message(msg("1"), next);
}
/// Knit a queue into the ready-ring and write it back to storage.
pub fn knit<T: Config>(o: &<<T as Config>::MessageProcessor as ProcessMessage>::Origin) {
let mut b = BookStateFor::<T>::get(o);
b.ready_neighbours = crate::Pallet::<T>::ready_ring_knit(o).ok().defensive();
BookStateFor::<T>::insert(o, b);
}
/// Unknit a queue into the ready-ring and write it back to storage.
pub fn unknit<T: Config>(o: &<<T as Config>::MessageProcessor as ProcessMessage>::Origin) {
let mut b = BookStateFor::<T>::get(o);
crate::Pallet::<T>::ready_ring_unknit(o, b.ready_neighbours.unwrap());
b.ready_neighbours = None;
BookStateFor::<T>::insert(o, b);
}
/// Build a ring with three queues: `Here`, `There` and `Everywhere(0)`.
pub fn build_ring<T: Config>(
queues: &[<<T as Config>::MessageProcessor as ProcessMessage>::Origin],
) {
for queue in queues.iter() {
crate::Pallet::<T>::enqueue_message(msg("1"), queue.clone());
}
assert_ring::<T>(queues);
}
/// Check that the Ready Ring consists of `queues` in that exact order.
///
/// Also check that all backlinks are valid and that the first element is the service head.
pub fn assert_ring<T: Config>(
queues: &[<<T as Config>::MessageProcessor as ProcessMessage>::Origin],
) {
for (i, origin) in queues.iter().enumerate() {
let book = BookStateFor::<T>::get(origin);
assert_eq!(
book.ready_neighbours,
Some(Neighbours {
prev: queues[(i + queues.len() - 1) % queues.len()].clone(),
next: queues[(i + 1) % queues.len()].clone(),
})
);
}
assert_eq!(ServiceHead::<T>::get(), queues.first().cloned());
}
File diff suppressed because it is too large Load Diff
+367
View File
@@ -0,0 +1,367 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Autogenerated weights for `pallet_message_queue`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `4563561839a5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024`
// Executed Command:
// frame-omni-bencher
// v1
// benchmark
// pallet
// --extrinsic=*
// --runtime=target/production/wbuild/kitchensink-runtime/kitchensink_runtime.wasm
// --pallet=pallet_message_queue
// --header=/__w/pezkuwi-sdk/pezkuwi-sdk/substrate/HEADER-APACHE2
// --output=/__w/pezkuwi-sdk/pezkuwi-sdk/substrate/frame/message-queue/src/weights.rs
// --wasm-execution=compiled
// --steps=50
// --repeat=20
// --heap-pages=4096
// --template=substrate/.maintain/frame-weight-template.hbs
// --no-storage-info
// --no-min-squares
// --no-median-slopes
// --genesis-builder-policy=none
// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
#![allow(missing_docs)]
#![allow(dead_code)]
use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
use core::marker::PhantomData;
/// Weight functions needed for `pallet_message_queue`.
pub trait WeightInfo {
fn ready_ring_knit() -> Weight;
fn ready_ring_unknit() -> Weight;
fn service_queue_base() -> Weight;
fn service_page_base_completion() -> Weight;
fn service_page_base_no_completion() -> Weight;
fn service_page_item() -> Weight;
fn bump_service_head() -> Weight;
fn set_service_head() -> Weight;
fn reap_page() -> Weight;
fn execute_overweight_page_removed() -> Weight;
fn execute_overweight_page_updated() -> Weight;
}
/// Weights for `pallet_message_queue` using the Substrate node and recommended hardware.
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
/// Storage: `MessageQueue::ServiceHead` (r:1 w:0)
/// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::BookStateFor` (r:2 w:2)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
fn ready_ring_knit() -> Weight {
// Proof Size summary in bytes:
// Measured: `209`
// Estimated: `6038`
// Minimum execution time: 12_258_000 picoseconds.
Weight::from_parts(12_505_000, 6038)
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:2 w:2)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::ServiceHead` (r:1 w:1)
/// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn ready_ring_unknit() -> Weight {
// Proof Size summary in bytes:
// Measured: `209`
// Estimated: `6038`
// Minimum execution time: 11_102_000 picoseconds.
Weight::from_parts(11_403_000, 6038)
.saturating_add(T::DbWeight::get().reads(3_u64))
.saturating_add(T::DbWeight::get().writes(3_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
fn service_queue_base() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `3514`
// Minimum execution time: 2_413_000 picoseconds.
Weight::from_parts(2_535_000, 3514)
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn service_page_base_completion() -> Weight {
// Proof Size summary in bytes:
// Measured: `50`
// Estimated: `69049`
// Minimum execution time: 4_484_000 picoseconds.
Weight::from_parts(4_692_000, 69049)
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn service_page_base_no_completion() -> Weight {
// Proof Size summary in bytes:
// Measured: `50`
// Estimated: `69049`
// Minimum execution time: 4_546_000 picoseconds.
Weight::from_parts(4_713_000, 69049)
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:0 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::Pages` (r:0 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn service_page_item() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 169_004_000 picoseconds.
Weight::from_parts(171_711_000, 0)
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: `MessageQueue::ServiceHead` (r:1 w:1)
/// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::BookStateFor` (r:1 w:0)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
fn bump_service_head() -> Weight {
// Proof Size summary in bytes:
// Measured: `154`
// Estimated: `3514`
// Minimum execution time: 6_878_000 picoseconds.
Weight::from_parts(7_040_000, 3514)
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:0)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::ServiceHead` (r:0 w:1)
/// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn set_service_head() -> Weight {
// Proof Size summary in bytes:
// Measured: `154`
// Estimated: `3514`
// Minimum execution time: 5_960_000 picoseconds.
Weight::from_parts(6_145_000, 3514)
.saturating_add(T::DbWeight::get().reads(1_u64))
.saturating_add(T::DbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn reap_page() -> Weight {
// Proof Size summary in bytes:
// Measured: `65652`
// Estimated: `69049`
// Minimum execution time: 61_149_000 picoseconds.
Weight::from_parts(61_831_000, 69049)
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn execute_overweight_page_removed() -> Weight {
// Proof Size summary in bytes:
// Measured: `65652`
// Estimated: `69049`
// Minimum execution time: 78_851_000 picoseconds.
Weight::from_parts(79_899_000, 69049)
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn execute_overweight_page_updated() -> Weight {
// Proof Size summary in bytes:
// Measured: `65652`
// Estimated: `69049`
// Minimum execution time: 117_359_000 picoseconds.
Weight::from_parts(118_679_000, 69049)
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(2_u64))
}
}
// For backwards compatibility and tests.
impl WeightInfo for () {
/// Storage: `MessageQueue::ServiceHead` (r:1 w:0)
/// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::BookStateFor` (r:2 w:2)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
fn ready_ring_knit() -> Weight {
// Proof Size summary in bytes:
// Measured: `209`
// Estimated: `6038`
// Minimum execution time: 12_258_000 picoseconds.
Weight::from_parts(12_505_000, 6038)
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:2 w:2)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::ServiceHead` (r:1 w:1)
/// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn ready_ring_unknit() -> Weight {
// Proof Size summary in bytes:
// Measured: `209`
// Estimated: `6038`
// Minimum execution time: 11_102_000 picoseconds.
Weight::from_parts(11_403_000, 6038)
.saturating_add(RocksDbWeight::get().reads(3_u64))
.saturating_add(RocksDbWeight::get().writes(3_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
fn service_queue_base() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `3514`
// Minimum execution time: 2_413_000 picoseconds.
Weight::from_parts(2_535_000, 3514)
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn service_page_base_completion() -> Weight {
// Proof Size summary in bytes:
// Measured: `50`
// Estimated: `69049`
// Minimum execution time: 4_484_000 picoseconds.
Weight::from_parts(4_692_000, 69049)
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn service_page_base_no_completion() -> Weight {
// Proof Size summary in bytes:
// Measured: `50`
// Estimated: `69049`
// Minimum execution time: 4_546_000 picoseconds.
Weight::from_parts(4_713_000, 69049)
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:0 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::Pages` (r:0 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn service_page_item() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 169_004_000 picoseconds.
Weight::from_parts(171_711_000, 0)
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: `MessageQueue::ServiceHead` (r:1 w:1)
/// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::BookStateFor` (r:1 w:0)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
fn bump_service_head() -> Weight {
// Proof Size summary in bytes:
// Measured: `154`
// Estimated: `3514`
// Minimum execution time: 6_878_000 picoseconds.
Weight::from_parts(7_040_000, 3514)
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:0)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::ServiceHead` (r:0 w:1)
/// Proof: `MessageQueue::ServiceHead` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`)
fn set_service_head() -> Weight {
// Proof Size summary in bytes:
// Measured: `154`
// Estimated: `3514`
// Minimum execution time: 5_960_000 picoseconds.
Weight::from_parts(6_145_000, 3514)
.saturating_add(RocksDbWeight::get().reads(1_u64))
.saturating_add(RocksDbWeight::get().writes(1_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn reap_page() -> Weight {
// Proof Size summary in bytes:
// Measured: `65652`
// Estimated: `69049`
// Minimum execution time: 61_149_000 picoseconds.
Weight::from_parts(61_831_000, 69049)
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn execute_overweight_page_removed() -> Weight {
// Proof Size summary in bytes:
// Measured: `65652`
// Estimated: `69049`
// Minimum execution time: 78_851_000 picoseconds.
Weight::from_parts(79_899_000, 69049)
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
/// Storage: `MessageQueue::BookStateFor` (r:1 w:1)
/// Proof: `MessageQueue::BookStateFor` (`max_values`: None, `max_size`: Some(49), added: 2524, mode: `MaxEncodedLen`)
/// Storage: `MessageQueue::Pages` (r:1 w:1)
/// Proof: `MessageQueue::Pages` (`max_values`: None, `max_size`: Some(65584), added: 68059, mode: `MaxEncodedLen`)
fn execute_overweight_page_updated() -> Weight {
// Proof Size summary in bytes:
// Measured: `65652`
// Estimated: `69049`
// Minimum execution time: 117_359_000 picoseconds.
Weight::from_parts(118_679_000, 69049)
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(2_u64))
}
}