feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit e4778b4576
6838 changed files with 1847450 additions and 0 deletions
@@ -0,0 +1,82 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! Common traits and types used by the scheduler and assignment providers.
use scale_info::TypeInfo;
use sp_runtime::{
codec::{Decode, Encode},
RuntimeDebug,
};
use pezkuwi_primitives::{CoreIndex, Id as ParaId};
/// Assignment (ParaId -> CoreIndex).
#[derive(Encode, Decode, TypeInfo, RuntimeDebug, Clone, PartialEq)]
pub enum Assignment {
/// A pool assignment.
Pool {
/// The assigned para id.
para_id: ParaId,
/// The core index the para got assigned to.
core_index: CoreIndex,
},
/// A bulk assignment.
Bulk(ParaId),
}
impl Assignment {
/// Returns the [`ParaId`] this assignment is associated to.
pub fn para_id(&self) -> ParaId {
match self {
Self::Pool { para_id, .. } => *para_id,
Self::Bulk(para_id) => *para_id,
}
}
}
pub trait AssignmentProvider<BlockNumber> {
/// Pops an [`Assignment`] from the provider for a specified [`CoreIndex`].
///
/// This is where assignments come into existence.
fn pop_assignment_for_core(core_idx: CoreIndex) -> Option<Assignment>;
/// A previously popped `Assignment` has been fully processed.
///
/// Report back to the assignment provider that an assignment is done and no longer present in
/// the scheduler.
///
/// This is one way of the life of an assignment coming to an end.
fn report_processed(assignment: Assignment);
/// Push back a previously popped assignment.
///
/// If the assignment could not be processed within the current session, it can be pushed back
/// to the assignment provider in order to be popped again later.
///
/// This is the second way the life of an assignment can come to an end.
fn push_back_assignment(assignment: Assignment);
/// Push some assignment for mocking/benchmarks purposes.
///
/// Useful for benchmarks and testing. The returned assignment is "valid" and can if need be
/// passed into `report_processed` for example.
#[cfg(any(feature = "runtime-benchmarks", test))]
fn get_mock_assignment(core_idx: CoreIndex, para_id: ParaId) -> Assignment;
/// Report that an assignment was duplicated by the scheduler.
fn assignment_duplicated(assignment: &Assignment);
}
@@ -0,0 +1,494 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
//! A module that is responsible for migration of storage.
use super::*;
use alloc::vec::Vec;
use frame_support::{
migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias,
traits::UncheckedOnRuntimeUpgrade, weights::Weight,
};
/// Old/legacy assignment representation (v0).
///
/// `Assignment` used to be a concrete type with the same layout V0Assignment, identical on all
/// assignment providers. This can be removed once storage has been migrated.
#[derive(Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Clone)]
struct V0Assignment {
pub para_id: ParaId,
}
/// Old scheduler with explicit parathreads and `Scheduled` storage instead of `ClaimQueue`.
mod v0 {
use super::*;
use pezkuwi_primitives::{CollatorId, Id};
#[storage_alias]
pub(super) type Scheduled<T: Config> = StorageValue<Pallet<T>, Vec<CoreAssignment>, ValueQuery>;
#[derive(Clone, Encode, Decode)]
#[cfg_attr(feature = "std", derive(PartialEq))]
pub struct ParathreadClaim(pub Id, pub CollatorId);
#[derive(Clone, Encode, Decode)]
#[cfg_attr(feature = "std", derive(PartialEq))]
pub struct ParathreadEntry {
/// The claim.
pub claim: ParathreadClaim,
/// Number of retries.
pub retries: u32,
}
/// What is occupying a specific availability core.
#[derive(Clone, Encode, Decode)]
#[cfg_attr(feature = "std", derive(PartialEq))]
pub enum CoreOccupied {
/// A parathread.
Parathread(ParathreadEntry),
/// A teyrchain.
Teyrchain,
}
/// The actual type isn't important, as we only delete the key in the state.
#[storage_alias]
pub(crate) type AvailabilityCores<T: Config> =
StorageValue<Pallet<T>, Vec<Option<CoreOccupied>>, ValueQuery>;
/// The actual type isn't important, as we only delete the key in the state.
#[storage_alias]
pub(super) type ParathreadQueue<T: Config> = StorageValue<Pallet<T>, (), ValueQuery>;
#[storage_alias]
pub(super) type ParathreadClaimIndex<T: Config> = StorageValue<Pallet<T>, (), ValueQuery>;
/// The assignment type.
#[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)]
#[cfg_attr(feature = "std", derive(PartialEq))]
pub enum AssignmentKind {
/// A teyrchain.
Teyrchain,
/// A parathread.
Parathread(CollatorId, u32),
}
/// How a free core is scheduled to be assigned.
#[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)]
#[cfg_attr(feature = "std", derive(PartialEq))]
pub struct CoreAssignment {
/// The core that is assigned.
pub core: CoreIndex,
/// The unique ID of the para that is assigned to the core.
pub para_id: ParaId,
/// The kind of the assignment.
pub kind: AssignmentKind,
/// The index of the validator group assigned to the core.
pub group_idx: GroupIndex,
}
}
// `ClaimQueue` got introduced.
//
// - Items are `Option` for some weird reason.
// - Assignments only consist of `ParaId`, `Assignment` is a concrete type (Same as V0Assignment).
mod v1 {
use frame_support::{
pallet_prelude::ValueQuery, storage_alias, traits::UncheckedOnRuntimeUpgrade,
weights::Weight,
};
use frame_system::pallet_prelude::BlockNumberFor;
use super::*;
use crate::scheduler;
#[storage_alias]
pub(super) type ClaimQueue<T: Config> = StorageValue<
Pallet<T>,
BTreeMap<CoreIndex, VecDeque<Option<ParasEntry<BlockNumberFor<T>>>>>,
ValueQuery,
>;
#[storage_alias]
pub(super) type AvailabilityCores<T: Config> =
StorageValue<Pallet<T>, Vec<CoreOccupied<BlockNumberFor<T>>>, ValueQuery>;
#[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)]
pub(super) enum CoreOccupied<N> {
/// No candidate is waiting availability on this core right now (the core is not occupied).
Free,
/// A para is currently waiting for availability/inclusion on this core.
Paras(ParasEntry<N>),
}
#[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)]
pub(super) struct ParasEntry<N> {
/// The underlying `Assignment`
pub(super) assignment: V0Assignment,
/// The number of times the entry has timed out in availability already.
pub(super) availability_timeouts: u32,
/// The block height until this entry needs to be backed.
///
/// If missed the entry will be removed from the claim queue without ever having occupied
/// the core.
pub(super) ttl: N,
}
impl<N> ParasEntry<N> {
/// Create a new `ParasEntry`.
pub(super) fn new(assignment: V0Assignment, now: N) -> Self {
ParasEntry { assignment, availability_timeouts: 0, ttl: now }
}
/// Return `Id` from the underlying `Assignment`.
pub(super) fn para_id(&self) -> ParaId {
self.assignment.para_id
}
}
fn add_to_claimqueue<T: Config>(core_idx: CoreIndex, pe: ParasEntry<BlockNumberFor<T>>) {
ClaimQueue::<T>::mutate(|la| {
la.entry(core_idx).or_default().push_back(Some(pe));
});
}
/// Migration to V1
pub struct UncheckedMigrateToV1<T>(core::marker::PhantomData<T>);
impl<T: Config> UncheckedOnRuntimeUpgrade for UncheckedMigrateToV1<T> {
fn on_runtime_upgrade() -> Weight {
let mut weight: Weight = Weight::zero();
v0::ParathreadQueue::<T>::kill();
v0::ParathreadClaimIndex::<T>::kill();
let now = frame_system::Pallet::<T>::block_number();
let scheduled = v0::Scheduled::<T>::take();
let sched_len = scheduled.len() as u64;
for core_assignment in scheduled {
let core_idx = core_assignment.core;
let assignment = V0Assignment { para_id: core_assignment.para_id };
let pe = v1::ParasEntry::new(assignment, now);
v1::add_to_claimqueue::<T>(core_idx, pe);
}
let teyrchains = paras::Teyrchains::<T>::get();
let availability_cores = v0::AvailabilityCores::<T>::take();
let mut new_availability_cores = Vec::new();
for (core_index, core) in availability_cores.into_iter().enumerate() {
let new_core = if let Some(core) = core {
match core {
v0::CoreOccupied::Teyrchain =>
v1::CoreOccupied::Paras(v1::ParasEntry::new(
V0Assignment { para_id: teyrchains[core_index] },
now,
)),
v0::CoreOccupied::Parathread(entry) => v1::CoreOccupied::Paras(
v1::ParasEntry::new(V0Assignment { para_id: entry.claim.0 }, now),
),
}
} else {
v1::CoreOccupied::Free
};
new_availability_cores.push(new_core);
}
v1::AvailabilityCores::<T>::set(new_availability_cores);
// 2x as once for Scheduled and once for Claimqueue
weight.saturating_accrue(T::DbWeight::get().reads_writes(2 * sched_len, 2 * sched_len));
// reading teyrchains + availability_cores, writing AvailabilityCores
weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 1));
// 2x kill
weight.saturating_accrue(T::DbWeight::get().writes(2));
log::info!(target: scheduler::LOG_TARGET, "Migrated para scheduler storage to v1");
weight
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, sp_runtime::DispatchError> {
let n: u32 = v0::Scheduled::<T>::get().len() as u32 +
v0::AvailabilityCores::<T>::get().iter().filter(|c| c.is_some()).count() as u32;
log::info!(
target: crate::scheduler::LOG_TARGET,
"Number of scheduled and waiting for availability before: {n}",
);
Ok(n.encode())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(state: Vec<u8>) -> Result<(), sp_runtime::DispatchError> {
log::info!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()");
ensure!(
v0::Scheduled::<T>::get().is_empty(),
"Scheduled should be empty after the migration"
);
let expected_len = u32::decode(&mut &state[..]).unwrap();
let availability_cores_waiting = v1::AvailabilityCores::<T>::get()
.into_iter()
.filter(|c| !matches!(c, v1::CoreOccupied::Free))
.count();
ensure!(
Pallet::<T>::claim_queue_len() as u32 + availability_cores_waiting as u32 ==
expected_len,
"ClaimQueue and AvailabilityCores should have the correct length",
);
Ok(())
}
}
}
/// Migrate `V0` to `V1` of the storage format.
pub type MigrateV0ToV1<T> = VersionedMigration<
0,
1,
v1::UncheckedMigrateToV1<T>,
Pallet<T>,
<T as frame_system::Config>::DbWeight,
>;
pub(crate) mod v2 {
use super::*;
use crate::scheduler;
#[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)]
pub(crate) enum CoreOccupied<N> {
Free,
Paras(ParasEntry<N>),
}
#[derive(Encode, Decode, TypeInfo, RuntimeDebug, PartialEq)]
pub(crate) struct ParasEntry<N> {
pub assignment: Assignment,
pub availability_timeouts: u32,
pub ttl: N,
}
// V2 (no Option wrapper) and new [`Assignment`].
#[storage_alias]
pub(crate) type ClaimQueue<T: Config> = StorageValue<
Pallet<T>,
BTreeMap<CoreIndex, VecDeque<ParasEntry<BlockNumberFor<T>>>>,
ValueQuery,
>;
#[storage_alias]
pub(crate) type AvailabilityCores<T: Config> =
StorageValue<Pallet<T>, Vec<CoreOccupied<BlockNumberFor<T>>>, ValueQuery>;
fn is_bulk<T: Config>(core_index: CoreIndex) -> bool {
core_index.0 < paras::Teyrchains::<T>::decode_len().unwrap_or(0) as u32
}
/// Migration to V2
pub struct UncheckedMigrateToV2<T>(core::marker::PhantomData<T>);
impl<T: Config> UncheckedOnRuntimeUpgrade for UncheckedMigrateToV2<T> {
fn on_runtime_upgrade() -> Weight {
let mut weight: Weight = Weight::zero();
let old = v1::ClaimQueue::<T>::take();
let new = old
.into_iter()
.map(|(k, v)| {
(
k,
v.into_iter()
.flatten()
.map(|p| {
let assignment = if is_bulk::<T>(k) {
Assignment::Bulk(p.para_id())
} else {
Assignment::Pool { para_id: p.para_id(), core_index: k }
};
ParasEntry {
assignment,
availability_timeouts: p.availability_timeouts,
ttl: p.ttl,
}
})
.collect::<VecDeque<_>>(),
)
})
.collect::<BTreeMap<CoreIndex, VecDeque<ParasEntry<BlockNumberFor<T>>>>>();
ClaimQueue::<T>::put(new);
let old = v1::AvailabilityCores::<T>::get();
let new = old
.into_iter()
.enumerate()
.map(|(k, a)| match a {
v1::CoreOccupied::Free => CoreOccupied::Free,
v1::CoreOccupied::Paras(paras) => {
let assignment = if is_bulk::<T>((k as u32).into()) {
Assignment::Bulk(paras.para_id())
} else {
Assignment::Pool {
para_id: paras.para_id(),
core_index: (k as u32).into(),
}
};
CoreOccupied::Paras(ParasEntry {
assignment,
availability_timeouts: paras.availability_timeouts,
ttl: paras.ttl,
})
},
})
.collect::<Vec<_>>();
AvailabilityCores::<T>::put(new);
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v2");
weight
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, sp_runtime::DispatchError> {
log::trace!(
target: crate::scheduler::LOG_TARGET,
"ClaimQueue before migration: {}",
v1::ClaimQueue::<T>::get().len()
);
let bytes = u32::to_be_bytes(v1::ClaimQueue::<T>::get().len() as u32);
Ok(bytes.to_vec())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(state: Vec<u8>) -> Result<(), sp_runtime::DispatchError> {
log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()");
let old_len = u32::from_be_bytes(state.try_into().unwrap());
ensure!(
v2::ClaimQueue::<T>::get().len() as u32 == old_len,
"Old ClaimQueue completely moved to new ClaimQueue after migration"
);
Ok(())
}
}
}
/// Migrate `V1` to `V2` of the storage format.
pub type MigrateV1ToV2<T> = VersionedMigration<
1,
2,
v2::UncheckedMigrateToV2<T>,
Pallet<T>,
<T as frame_system::Config>::DbWeight,
>;
/// Migration for TTL and availability timeout retries removal.
/// AvailabilityCores storage is removed and ClaimQueue now holds `Assignment`s instead of
/// `ParasEntryType`
mod v3 {
use super::*;
use crate::scheduler;
#[storage_alias]
pub(crate) type ClaimQueue<T: Config> =
StorageValue<Pallet<T>, BTreeMap<CoreIndex, VecDeque<Assignment>>, ValueQuery>;
/// Migration to V3
pub struct UncheckedMigrateToV3<T>(core::marker::PhantomData<T>);
impl<T: Config> UncheckedOnRuntimeUpgrade for UncheckedMigrateToV3<T> {
fn on_runtime_upgrade() -> Weight {
let mut weight: Weight = Weight::zero();
// Migrate ClaimQueuee to new format.
let old = v2::ClaimQueue::<T>::take();
let new = old
.into_iter()
.map(|(k, v)| {
(
k,
v.into_iter()
.map(|paras_entry| paras_entry.assignment)
.collect::<VecDeque<_>>(),
)
})
.collect::<BTreeMap<CoreIndex, VecDeque<Assignment>>>();
v3::ClaimQueue::<T>::put(new);
// Clear AvailabilityCores storage
v2::AvailabilityCores::<T>::kill();
weight.saturating_accrue(T::DbWeight::get().reads_writes(2, 2));
log::info!(target: scheduler::LOG_TARGET, "Migrating para scheduler storage to v3");
weight
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, sp_runtime::DispatchError> {
log::trace!(
target: crate::scheduler::LOG_TARGET,
"ClaimQueue before migration: {}",
v2::ClaimQueue::<T>::get().len()
);
let bytes = u32::to_be_bytes(v2::ClaimQueue::<T>::get().len() as u32);
Ok(bytes.to_vec())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(state: Vec<u8>) -> Result<(), sp_runtime::DispatchError> {
log::trace!(target: crate::scheduler::LOG_TARGET, "Running post_upgrade()");
let old_len = u32::from_be_bytes(state.try_into().unwrap());
ensure!(
v3::ClaimQueue::<T>::get().len() as u32 == old_len,
"Old ClaimQueue completely moved to new ClaimQueue after migration"
);
ensure!(
!v2::AvailabilityCores::<T>::exists(),
"AvailabilityCores storage should have been completely killed"
);
Ok(())
}
}
}
/// Migrate `V2` to `V3` of the storage format.
pub type MigrateV2ToV3<T> = VersionedMigration<
2,
3,
v3::UncheckedMigrateToV3<T>,
Pallet<T>,
<T as frame_system::Config>::DbWeight,
>;
@@ -0,0 +1,847 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// This file is part of Pezkuwi.
// Pezkuwi is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Pezkuwi is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Pezkuwi. If not, see <http://www.gnu.org/licenses/>.
use super::*;
use alloc::collections::btree_map::BTreeMap;
use frame_support::assert_ok;
use pezkuwi_primitives::{BlockNumber, SchedulerParams, SessionIndex, ValidationCode, ValidatorId};
use sp_keyring::Sr25519Keyring;
use crate::{
configuration::HostConfiguration,
initializer::SessionChangeNotification,
mock::{
new_test_ext, Configuration, MockAssigner, MockGenesisConfig, Paras, ParasShared,
RuntimeOrigin, Scheduler, System, Test,
},
paras::{ParaGenesisArgs, ParaKind},
scheduler::{self, common::Assignment, ClaimQueue},
};
fn register_para(id: ParaId) {
let validation_code: ValidationCode = vec![1, 2, 3].into();
assert_ok!(Paras::schedule_para_initialize(
id,
ParaGenesisArgs {
genesis_head: Vec::new().into(),
validation_code: validation_code.clone(),
para_kind: ParaKind::Parathread, // This most closely mimics our test assigner
}
));
assert_ok!(Paras::add_trusted_validation_code(RuntimeOrigin::root(), validation_code));
}
fn run_to_block(
to: BlockNumber,
new_session: impl Fn(BlockNumber) -> Option<SessionChangeNotification<BlockNumber>>,
) {
while System::block_number() < to {
let b = System::block_number();
Scheduler::initializer_finalize();
Paras::initializer_finalize(b);
if let Some(mut notification) = new_session(b + 1) {
// We will make every session change trigger an action queue. Normally this may require
// 2 or more session changes.
if notification.session_index == SessionIndex::default() {
notification.session_index = ParasShared::scheduled_session();
}
Configuration::force_set_active_config(notification.new_config.clone());
Paras::initializer_on_new_session(&notification);
Scheduler::initializer_on_new_session(&notification);
}
System::on_finalize(b);
System::on_initialize(b + 1);
System::set_block_number(b + 1);
Paras::initializer_initialize(b + 1);
Scheduler::initializer_initialize(b + 1);
Scheduler::advance_claim_queue(&Default::default());
}
}
fn default_config() -> HostConfiguration<BlockNumber> {
HostConfiguration {
// This field does not affect anything that scheduler does. However, `HostConfiguration`
// is still a subject to consistency test. It requires that
// `minimum_validation_upgrade_delay` is greater than `chain_availability_period` and
// `thread_availability_period`.
minimum_validation_upgrade_delay: 6,
#[allow(deprecated)]
scheduler_params: SchedulerParams {
group_rotation_frequency: 10,
paras_availability_period: 3,
lookahead: 2,
num_cores: 3,
max_availability_timeouts: 1,
..Default::default()
},
..Default::default()
}
}
fn genesis_config(config: &HostConfiguration<BlockNumber>) -> MockGenesisConfig {
MockGenesisConfig {
configuration: crate::configuration::GenesisConfig { config: config.clone() },
..Default::default()
}
}
/// Internal access to assignments at the top of the claim queue.
fn next_assignments() -> impl Iterator<Item = (CoreIndex, Assignment)> {
let claim_queue = ClaimQueue::<Test>::get();
claim_queue
.into_iter()
.filter_map(|(core_idx, v)| v.front().map(|a| (core_idx, a.clone())))
}
#[test]
fn session_change_shuffles_validators() {
let mut config = default_config();
// Need five cores for this test
config.scheduler_params.num_cores = 5;
let genesis_config = genesis_config(&config);
new_test_ext(genesis_config).execute_with(|| {
assert!(ValidatorGroups::<Test>::get().is_empty());
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
ValidatorId::from(Sr25519Keyring::Charlie.public()),
ValidatorId::from(Sr25519Keyring::Dave.public()),
ValidatorId::from(Sr25519Keyring::Eve.public()),
ValidatorId::from(Sr25519Keyring::Ferdie.public()),
ValidatorId::from(Sr25519Keyring::One.public()),
],
random_seed: [99; 32],
..Default::default()
}),
_ => None,
});
let groups = ValidatorGroups::<Test>::get();
assert_eq!(groups.len(), 5);
// first two groups have the overflow.
for i in 0..2 {
assert_eq!(groups[i].len(), 2);
}
for i in 2..5 {
assert_eq!(groups[i].len(), 1);
}
});
}
#[test]
fn session_change_takes_only_max_per_core() {
let config = {
let mut config = default_config();
// Simulate 2 cores between all usage types
config.scheduler_params.num_cores = 2;
config.scheduler_params.max_validators_per_core = Some(1);
config
};
let genesis_config = genesis_config(&config);
new_test_ext(genesis_config).execute_with(|| {
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
ValidatorId::from(Sr25519Keyring::Charlie.public()),
ValidatorId::from(Sr25519Keyring::Dave.public()),
ValidatorId::from(Sr25519Keyring::Eve.public()),
ValidatorId::from(Sr25519Keyring::Ferdie.public()),
ValidatorId::from(Sr25519Keyring::One.public()),
],
random_seed: [99; 32],
..Default::default()
}),
_ => None,
});
let groups = ValidatorGroups::<Test>::get();
assert_eq!(groups.len(), 7);
// Every validator gets its own group, even though there are 2 cores.
for i in 0..7 {
assert_eq!(groups[i].len(), 1);
}
});
}
#[test]
// Test that `advance_claim_queue` doubles the first assignment only for a core that didn't use to
// have any assignments.
fn advance_claim_queue_doubles_assignment_only_if_empty() {
let mut config = default_config();
config.scheduler_params.lookahead = 3;
config.scheduler_params.num_cores = 2;
let genesis_config = genesis_config(&config);
let para_a = ParaId::from(3_u32);
let para_b = ParaId::from(4_u32);
let para_c = ParaId::from(5_u32);
let assignment_a = Assignment::Bulk(para_a);
let assignment_b = Assignment::Bulk(para_b);
let assignment_c = Assignment::Bulk(para_c);
new_test_ext(genesis_config).execute_with(|| {
// Add 3 paras
register_para(para_a);
register_para(para_b);
register_para(para_c);
// start a new session to activate, 2 validators for 2 cores.
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
],
..Default::default()
}),
_ => None,
});
// add some para assignments.
MockAssigner::add_test_assignment(assignment_a.clone());
MockAssigner::add_test_assignment(assignment_b.clone());
MockAssigner::add_test_assignment(assignment_c.clone());
// This will call advance_claim_queue
run_to_block(2, |_| None);
{
assert_eq!(Scheduler::claim_queue_len(), 5);
let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
// Because the claim queue used to be empty, the first assignment is doubled for every
// core so that the first para gets a fair shot at backing something.
assert_eq!(
claim_queue.remove(&CoreIndex(0)).unwrap(),
[assignment_a.clone(), assignment_a, assignment_b]
.into_iter()
.collect::<VecDeque<_>>()
);
assert_eq!(
claim_queue.remove(&CoreIndex(1)).unwrap(),
[assignment_c.clone(), assignment_c].into_iter().collect::<VecDeque<_>>()
);
}
});
}
#[test]
// Test that `advance_claim_queue` doesn't populate for cores which have no assignments.
fn advance_claim_queue_no_entry_if_empty() {
let mut config = default_config();
config.scheduler_params.lookahead = 3;
config.scheduler_params.num_cores = 2;
let genesis_config = genesis_config(&config);
let para_a = ParaId::from(3_u32);
let assignment_a = Assignment::Bulk(para_a);
new_test_ext(genesis_config).execute_with(|| {
// Add 1 para
register_para(para_a);
// start a new session to activate, 2 validators for 2 cores.
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
],
..Default::default()
}),
_ => None,
});
MockAssigner::add_test_assignment(assignment_a.clone());
// This will call advance_claim_queue
run_to_block(3, |_| None);
{
let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
assert_eq!(
claim_queue.remove(&CoreIndex(0)).unwrap(),
[assignment_a].into_iter().collect::<VecDeque<_>>()
);
// Even though core 1 exists, there's no assignment for it so it's not present in the
// claim queue.
assert!(claim_queue.remove(&CoreIndex(1)).is_none());
}
});
}
#[test]
// Test that `advance_claim_queue` only advances for cores that are not part of the `except_for`
// set.
fn advance_claim_queue_except_for() {
let mut config = default_config();
// NOTE: This test expects on demand cores to each get slotted on to a different core
// and not fill up the claimqueue of each core first.
config.scheduler_params.lookahead = 1;
config.scheduler_params.num_cores = 3;
let genesis_config = genesis_config(&config);
let para_a = ParaId::from(1_u32);
let para_b = ParaId::from(2_u32);
let para_c = ParaId::from(3_u32);
let para_d = ParaId::from(4_u32);
let para_e = ParaId::from(5_u32);
let assignment_a = Assignment::Bulk(para_a);
let assignment_b = Assignment::Bulk(para_b);
let assignment_c = Assignment::Bulk(para_c);
let assignment_d = Assignment::Bulk(para_d);
let assignment_e = Assignment::Bulk(para_e);
new_test_ext(genesis_config).execute_with(|| {
// add 5 paras
register_para(para_a);
register_para(para_b);
register_para(para_c);
register_para(para_d);
register_para(para_e);
// start a new session to activate, 3 validators for 3 cores.
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
ValidatorId::from(Sr25519Keyring::Charlie.public()),
],
..Default::default()
}),
_ => None,
});
// add a couple of para claims now that paras are live
MockAssigner::add_test_assignment(assignment_a.clone());
MockAssigner::add_test_assignment(assignment_c.clone());
run_to_block(2, |_| None);
Scheduler::advance_claim_queue(&Default::default());
// Queues of all cores should be empty
assert_eq!(Scheduler::claim_queue_len(), 0);
MockAssigner::add_test_assignment(assignment_a.clone());
MockAssigner::add_test_assignment(assignment_c.clone());
MockAssigner::add_test_assignment(assignment_b.clone());
MockAssigner::add_test_assignment(assignment_d.clone());
MockAssigner::add_test_assignment(assignment_e.clone());
run_to_block(3, |_| None);
{
let scheduled: BTreeMap<_, _> = next_assignments().collect();
assert_eq!(scheduled.len(), 3);
assert_eq!(scheduled.get(&CoreIndex(0)).unwrap(), &Assignment::Bulk(para_a));
assert_eq!(scheduled.get(&CoreIndex(1)).unwrap(), &Assignment::Bulk(para_c));
assert_eq!(scheduled.get(&CoreIndex(2)).unwrap(), &Assignment::Bulk(para_b));
}
// now note that cores 0 and 1 were freed.
Scheduler::advance_claim_queue(&std::iter::once(CoreIndex(2)).collect());
{
let scheduled: BTreeMap<_, _> = next_assignments().collect();
// 1 thing scheduled before, + 2 cores freed.
assert_eq!(scheduled.len(), 3);
assert_eq!(scheduled.get(&CoreIndex(0)).unwrap(), &Assignment::Bulk(para_d));
assert_eq!(scheduled.get(&CoreIndex(1)).unwrap(), &Assignment::Bulk(para_e));
assert_eq!(scheduled.get(&CoreIndex(2)).unwrap(), &Assignment::Bulk(para_b));
}
});
}
#[test]
fn schedule_rotates_groups() {
let on_demand_cores = 2;
let config = {
let mut config = default_config();
config.scheduler_params.lookahead = 1;
config.scheduler_params.num_cores = on_demand_cores;
config
};
let rotation_frequency = config.scheduler_params.group_rotation_frequency;
let genesis_config = genesis_config(&config);
let para_a = ParaId::from(1_u32);
let para_b = ParaId::from(2_u32);
new_test_ext(genesis_config).execute_with(|| {
register_para(para_a);
register_para(para_b);
// start a new session to activate, 2 validators for 2 cores.
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Eve.public()),
],
..Default::default()
}),
_ => None,
});
let session_start_block = scheduler::SessionStartBlock::<Test>::get();
assert_eq!(session_start_block, 1);
let mut now = 2;
run_to_block(now, |_| None);
let assert_groups_rotated = |rotations: u32, now: &BlockNumberFor<Test>| {
assert_eq!(
Scheduler::group_assigned_to_core(CoreIndex(0), *now).unwrap(),
GroupIndex((0u32 + rotations) % on_demand_cores)
);
assert_eq!(
Scheduler::group_assigned_to_core(CoreIndex(1), *now).unwrap(),
GroupIndex((1u32 + rotations) % on_demand_cores)
);
};
assert_groups_rotated(0, &now);
// one block before first rotation.
now = rotation_frequency;
run_to_block(now, |_| None);
assert_groups_rotated(0, &now);
// first rotation.
now = now + 1;
run_to_block(now, |_| None);
assert_groups_rotated(1, &now);
// one block before second rotation.
now = rotation_frequency * 2;
run_to_block(now, |_| None);
assert_groups_rotated(1, &now);
// second rotation.
now = now + 1;
run_to_block(now, |_| None);
assert_groups_rotated(2, &now);
});
}
#[test]
fn availability_predicate_works() {
let genesis_config = genesis_config(&default_config());
let SchedulerParams { group_rotation_frequency, paras_availability_period, .. } =
default_config().scheduler_params;
new_test_ext(genesis_config).execute_with(|| {
run_to_block(1 + paras_availability_period, |_| None);
assert!(!Scheduler::availability_timeout_check_required());
run_to_block(1 + group_rotation_frequency, |_| None);
{
let now = System::block_number();
assert!(Scheduler::availability_timeout_check_required());
let pred = Scheduler::availability_timeout_predicate();
let last_rotation = Scheduler::group_rotation_info(now).last_rotation_at();
let would_be_timed_out = now - paras_availability_period;
let should_not_be_timed_out = last_rotation;
assert!(pred(would_be_timed_out).timed_out);
assert!(!pred(should_not_be_timed_out).timed_out);
assert!(!pred(now).timed_out);
// check the threshold is exact.
assert!(!pred(would_be_timed_out + 1).timed_out);
}
});
}
#[test]
fn next_up_on_available_uses_next_scheduled_or_none() {
let mut config = default_config();
config.scheduler_params.num_cores = 1;
let genesis_config = genesis_config(&config);
let para_a = ParaId::from(1_u32);
let para_b = ParaId::from(2_u32);
new_test_ext(genesis_config).execute_with(|| {
register_para(para_a);
register_para(para_b);
// start a new session to activate, 2 validators for 2 cores.
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Eve.public()),
],
..Default::default()
}),
_ => None,
});
MockAssigner::add_test_assignment(Assignment::Bulk(para_a));
run_to_block(2, |_| None);
{
// Two assignments for A on core 0, because the claim queue used to be empty.
assert_eq!(Scheduler::claim_queue_len(), 2);
assert!(Scheduler::next_up_on_available(CoreIndex(1)).is_none());
assert_eq!(
Scheduler::next_up_on_available(CoreIndex(0)).unwrap(),
ScheduledCore { para_id: para_a, collator: None }
);
Scheduler::advance_claim_queue(&Default::default());
assert_eq!(
Scheduler::next_up_on_available(CoreIndex(0)).unwrap(),
ScheduledCore { para_id: para_a, collator: None }
);
Scheduler::advance_claim_queue(&Default::default());
assert!(Scheduler::next_up_on_available(CoreIndex(0)).is_none());
}
});
}
#[test]
fn session_change_increasing_number_of_cores() {
let mut config = default_config();
config.scheduler_params.num_cores = 2;
let genesis_config = genesis_config(&config);
let para_a = ParaId::from(3_u32);
let para_b = ParaId::from(4_u32);
let assignment_a = Assignment::Bulk(para_a);
let assignment_b = Assignment::Bulk(para_b);
new_test_ext(genesis_config).execute_with(|| {
// Add 2 paras
register_para(para_a);
register_para(para_b);
// start a new session to activate, 2 validators for 2 cores.
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
],
..Default::default()
}),
_ => None,
});
MockAssigner::add_test_assignment(assignment_a.clone());
MockAssigner::add_test_assignment(assignment_b.clone());
// This will call advance_claim_queue
run_to_block(2, |_| None);
{
let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
assert_eq!(Scheduler::claim_queue_len(), 4);
assert_eq!(
claim_queue.remove(&CoreIndex(0)).unwrap(),
[assignment_a.clone(), assignment_a.clone()]
.into_iter()
.collect::<VecDeque<_>>()
);
assert_eq!(
claim_queue.remove(&CoreIndex(1)).unwrap(),
[assignment_b.clone(), assignment_b.clone()]
.into_iter()
.collect::<VecDeque<_>>()
);
}
// Increase number of cores to 4.
let old_config = config;
let mut new_config = old_config.clone();
new_config.scheduler_params.num_cores = 4;
// add another assignment for para b.
MockAssigner::add_test_assignment(assignment_b.clone());
run_to_block(3, |number| match number {
3 => Some(SessionChangeNotification {
new_config: new_config.clone(),
prev_config: old_config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
ValidatorId::from(Sr25519Keyring::Charlie.public()),
ValidatorId::from(Sr25519Keyring::Dave.public()),
],
..Default::default()
}),
_ => None,
});
{
let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
assert_eq!(Scheduler::claim_queue_len(), 3);
assert_eq!(
claim_queue.remove(&CoreIndex(0)).unwrap(),
[assignment_a].into_iter().collect::<VecDeque<_>>()
);
assert_eq!(
claim_queue.remove(&CoreIndex(1)).unwrap(),
[assignment_b.clone()].into_iter().collect::<VecDeque<_>>()
);
assert_eq!(
claim_queue.remove(&CoreIndex(2)).unwrap(),
[assignment_b.clone()].into_iter().collect::<VecDeque<_>>()
);
}
});
}
#[test]
fn session_change_decreasing_number_of_cores() {
let mut config = default_config();
config.scheduler_params.num_cores = 3;
let genesis_config = genesis_config(&config);
let para_a = ParaId::from(3_u32);
let para_b = ParaId::from(4_u32);
let assignment_a = Assignment::Bulk(para_a);
let assignment_b = Assignment::Bulk(para_b);
new_test_ext(genesis_config).execute_with(|| {
// Add 2 paras
register_para(para_a);
register_para(para_b);
// start a new session to activate, 2 validators for 2 cores.
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
],
..Default::default()
}),
_ => None,
});
scheduler::Pallet::<Test>::set_claim_queue(BTreeMap::from([
(CoreIndex::from(0), VecDeque::from([assignment_a.clone()])),
// Leave a hole for core 1.
(CoreIndex::from(2), VecDeque::from([assignment_b.clone(), assignment_b.clone()])),
]));
// Decrease number of cores to 1.
let old_config = config;
let mut new_config = old_config.clone();
new_config.scheduler_params.num_cores = 1;
// Session change.
// Assignment A had its shot already so will be dropped for good.
// The two assignments of B will be pushed back to the assignment provider.
run_to_block(3, |number| match number {
3 => Some(SessionChangeNotification {
new_config: new_config.clone(),
prev_config: old_config.clone(),
validators: vec![ValidatorId::from(Sr25519Keyring::Alice.public())],
..Default::default()
}),
_ => None,
});
let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
assert_eq!(Scheduler::claim_queue_len(), 1);
// There's only one assignment for B because run_to_block also calls advance_claim_queue at
// the end.
assert_eq!(
claim_queue.remove(&CoreIndex(0)).unwrap(),
[assignment_b.clone()].into_iter().collect::<VecDeque<_>>()
);
Scheduler::advance_claim_queue(&Default::default());
// No more assignments now.
assert_eq!(Scheduler::claim_queue_len(), 0);
// Retain number of cores to 1 but remove all validator groups. The claim queue length
// should be the minimum of these two.
// Add an assignment.
MockAssigner::add_test_assignment(assignment_b.clone());
run_to_block(4, |number| match number {
4 => Some(SessionChangeNotification {
new_config: new_config.clone(),
prev_config: new_config.clone(),
validators: vec![],
..Default::default()
}),
_ => None,
});
assert_eq!(Scheduler::claim_queue_len(), 0);
});
}
#[test]
fn session_change_increasing_lookahead() {
let mut config = default_config();
config.scheduler_params.num_cores = 2;
config.scheduler_params.lookahead = 2;
let genesis_config = genesis_config(&config);
let para_a = ParaId::from(3_u32);
let para_b = ParaId::from(4_u32);
let assignment_a = Assignment::Bulk(para_a);
let assignment_b = Assignment::Bulk(para_b);
new_test_ext(genesis_config).execute_with(|| {
// Add 2 paras
register_para(para_a);
register_para(para_b);
// start a new session to activate, 2 validators for 2 cores.
run_to_block(1, |number| match number {
1 => Some(SessionChangeNotification {
new_config: config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
],
..Default::default()
}),
_ => None,
});
MockAssigner::add_test_assignment(assignment_a.clone());
MockAssigner::add_test_assignment(assignment_a.clone());
MockAssigner::add_test_assignment(assignment_a.clone());
MockAssigner::add_test_assignment(assignment_b.clone());
MockAssigner::add_test_assignment(assignment_b.clone());
MockAssigner::add_test_assignment(assignment_b.clone());
// Lookahead is currently 2.
run_to_block(2, |_| None);
{
let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
assert_eq!(Scheduler::claim_queue_len(), 4);
assert_eq!(
claim_queue.remove(&CoreIndex(0)).unwrap(),
[assignment_a.clone(), assignment_a.clone()]
.into_iter()
.collect::<VecDeque<_>>()
);
assert_eq!(
claim_queue.remove(&CoreIndex(1)).unwrap(),
[assignment_a.clone(), assignment_a.clone()]
.into_iter()
.collect::<VecDeque<_>>()
);
}
// Increase lookahead to 4.
let old_config = config;
let mut new_config = old_config.clone();
new_config.scheduler_params.lookahead = 4;
run_to_block(3, |number| match number {
3 => Some(SessionChangeNotification {
new_config: new_config.clone(),
prev_config: old_config.clone(),
validators: vec![
ValidatorId::from(Sr25519Keyring::Alice.public()),
ValidatorId::from(Sr25519Keyring::Bob.public()),
],
..Default::default()
}),
_ => None,
});
{
let mut claim_queue = scheduler::ClaimQueue::<Test>::get();
assert_eq!(Scheduler::claim_queue_len(), 6);
assert_eq!(
claim_queue.remove(&CoreIndex(0)).unwrap(),
[assignment_a.clone(), assignment_a.clone(), assignment_b.clone()]
.into_iter()
.collect::<VecDeque<_>>()
);
assert_eq!(
claim_queue.remove(&CoreIndex(1)).unwrap(),
[assignment_a.clone(), assignment_b.clone(), assignment_b.clone()]
.into_iter()
.collect::<VecDeque<_>>()
);
}
});
}