feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit e4778b4576
6838 changed files with 1847450 additions and 0 deletions
+199
View File
@@ -0,0 +1,199 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::*;
use frame_support::defensive;
/// Controls validator disabling
pub trait DisablingStrategy<T: Config> {
/// Make a disabling decision. Returning a [`DisablingDecision`]
fn decision(
offender_stash: &T::ValidatorId,
offender_slash_severity: OffenceSeverity,
currently_disabled: &Vec<(u32, OffenceSeverity)>,
) -> DisablingDecision;
}
/// Helper struct representing a decision coming from a given [`DisablingStrategy`] implementing
/// `decision`
///
/// `disable` is the index of the validator to disable,
/// `reenable` is the index of the validator to re-enable.
#[derive(Debug)]
pub struct DisablingDecision {
pub disable: Option<u32>,
pub reenable: Option<u32>,
}
impl<T: Config> DisablingStrategy<T> for () {
fn decision(
_offender_stash: &T::ValidatorId,
_offender_slash_severity: OffenceSeverity,
_currently_disabled: &Vec<(u32, OffenceSeverity)>,
) -> DisablingDecision {
DisablingDecision { disable: None, reenable: None }
}
}
/// Calculate the disabling limit based on the number of validators and the disabling limit factor.
///
/// This is a sensible default implementation for the disabling limit factor for most disabling
/// strategies.
///
/// Disabling limit factor n=2 -> 1/n = 1/2 = 50% of validators can be disabled
fn factor_based_disable_limit(validators_len: usize, disabling_limit_factor: usize) -> usize {
validators_len
.saturating_sub(1)
.checked_div(disabling_limit_factor)
.unwrap_or_else(|| {
defensive!("DISABLING_LIMIT_FACTOR should not be 0");
0
})
}
/// Implementation of [`DisablingStrategy`] using factor_based_disable_limit which disables
/// validators from the active set up to a threshold. `DISABLING_LIMIT_FACTOR` is the factor of the
/// maximum disabled validators in the active set. E.g. setting this value to `3` means no more than
/// 1/3 of the validators in the active set can be disabled in an era.
///
/// By default a factor of 3 is used which is the byzantine threshold.
pub struct UpToLimitDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
impl<const DISABLING_LIMIT_FACTOR: usize> UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR> {
/// Disabling limit calculated from the total number of validators in the active set. When
/// reached no more validators will be disabled.
pub fn disable_limit(validators_len: usize) -> usize {
factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
}
}
impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
for UpToLimitDisablingStrategy<DISABLING_LIMIT_FACTOR>
{
fn decision(
offender_stash: &T::ValidatorId,
_offender_slash_severity: OffenceSeverity,
currently_disabled: &Vec<(u32, OffenceSeverity)>,
) -> DisablingDecision {
let active_set = Validators::<T>::get();
// We don't disable more than the limit
if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
log!(
debug,
"Won't disable: reached disabling limit {:?}",
Self::disable_limit(active_set.len())
);
return DisablingDecision { disable: None, reenable: None };
}
let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
idx as u32
} else {
log!(debug, "Won't disable: offender not in active set",);
return DisablingDecision { disable: None, reenable: None };
};
log!(debug, "Will disable {:?}", offender_idx);
DisablingDecision { disable: Some(offender_idx), reenable: None }
}
}
/// Implementation of [`DisablingStrategy`] which disables validators from the active set up to a
/// limit (factor_based_disable_limit) and if the limit is reached and the new offender is higher
/// (bigger punishment/severity) then it re-enables the lowest offender to free up space for the new
/// offender.
///
/// This strategy is not based on cumulative severity of offences but only on the severity of the
/// highest offence. Offender first committing a 25% offence and then a 50% offence will be treated
/// the same as an offender committing 50% offence.
///
/// An extension of [`UpToLimitDisablingStrategy`].
pub struct UpToLimitWithReEnablingDisablingStrategy<const DISABLING_LIMIT_FACTOR: usize = 3>;
impl<const DISABLING_LIMIT_FACTOR: usize>
UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
{
/// Disabling limit calculated from the total number of validators in the active set. When
/// reached re-enabling logic might kick in.
pub fn disable_limit(validators_len: usize) -> usize {
factor_based_disable_limit(validators_len, DISABLING_LIMIT_FACTOR)
}
}
impl<T: Config, const DISABLING_LIMIT_FACTOR: usize> DisablingStrategy<T>
for UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>
{
fn decision(
offender_stash: &T::ValidatorId,
offender_slash_severity: OffenceSeverity,
currently_disabled: &Vec<(u32, OffenceSeverity)>,
) -> DisablingDecision {
let active_set = Validators::<T>::get();
// We don't disable validators that are not in the active set
let offender_idx = if let Some(idx) = active_set.iter().position(|i| i == offender_stash) {
idx as u32
} else {
log!(debug, "Won't disable: offender not in active set",);
return DisablingDecision { disable: None, reenable: None };
};
// Check if offender is already disabled
if let Some((_, old_severity)) =
currently_disabled.iter().find(|(idx, _)| *idx == offender_idx)
{
if offender_slash_severity > *old_severity {
log!(debug, "Offender already disabled but with lower severity, will disable again to refresh severity of {:?}", offender_idx);
return DisablingDecision { disable: Some(offender_idx), reenable: None };
} else {
log!(debug, "Offender already disabled with higher or equal severity");
return DisablingDecision { disable: None, reenable: None };
}
}
// We don't disable more than the limit (but we can re-enable a smaller offender to make
// space)
if currently_disabled.len() >= Self::disable_limit(active_set.len()) {
log!(
debug,
"Reached disabling limit {:?}, checking for re-enabling",
Self::disable_limit(active_set.len())
);
// Find the smallest offender to re-enable that is not higher than
// offender_slash_severity
if let Some((smallest_idx, _)) = currently_disabled
.iter()
.filter(|(_, severity)| *severity <= offender_slash_severity)
.min_by_key(|(_, severity)| *severity)
{
log!(debug, "Will disable {:?} and re-enable {:?}", offender_idx, smallest_idx);
return DisablingDecision {
disable: Some(offender_idx),
reenable: Some(*smallest_idx),
};
} else {
log!(debug, "No smaller offender found to re-enable");
return DisablingDecision { disable: None, reenable: None };
}
} else {
// If we are not at the limit, just disable the new offender and dont re-enable anyone
log!(debug, "Will disable {:?}", offender_idx);
return DisablingDecision { disable: Some(offender_idx), reenable: None };
}
}
}
@@ -0,0 +1,516 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! An opt-in utility for tracking historical sessions in FRAME-session.
//!
//! This is generally useful when implementing blockchains that require accountable
//! safety where validators from some amount f prior sessions must remain slashable.
//!
//! Rather than store the full session data for any given session, we instead commit
//! to the roots of merkle tries containing the session data.
//!
//! These roots and proofs of inclusion can be generated at any time during the current session.
//! Afterwards, the proofs can be fed to a consensus module when reporting misbehavior.
pub mod offchain;
pub mod onchain;
mod shared;
use alloc::vec::Vec;
use codec::{Decode, Encode};
use core::fmt::Debug;
use sp_runtime::{
traits::{Convert, OpaqueKeys},
KeyTypeId,
};
use sp_session::{MembershipProof, ValidatorCount};
use sp_staking::SessionIndex;
use sp_trie::{
trie_types::{TrieDBBuilder, TrieDBMutBuilderV0},
LayoutV0, MemoryDB, RandomState, Recorder, StorageProof, Trie, TrieMut, TrieRecorder,
};
use frame_support::{
print,
traits::{KeyOwnerProofSystem, ValidatorSet, ValidatorSetWithIdentification},
Parameter,
};
const LOG_TARGET: &'static str = "runtime::historical";
use crate::{self as pallet_session, Pallet as Session};
pub use pallet::*;
use sp_trie::{accessed_nodes_tracker::AccessedNodesTracker, recorder_ext::RecorderExt};
#[frame_support::pallet]
pub mod pallet {
use super::*;
use frame_support::pallet_prelude::*;
/// The in-code storage version.
const STORAGE_VERSION: StorageVersion = StorageVersion::new(1);
#[pallet::pallet]
#[pallet::storage_version(STORAGE_VERSION)]
pub struct Pallet<T>(_);
/// Config necessary for the historical pallet.
#[pallet::config]
pub trait Config: pallet_session::Config + frame_system::Config {
/// The overarching event type.
#[allow(deprecated)]
type RuntimeEvent: From<Event<Self>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// Full identification of the validator.
type FullIdentification: Parameter;
/// A conversion from validator ID to full identification.
///
/// This should contain any references to economic actors associated with the
/// validator, since they may be outdated by the time this is queried from a
/// historical trie.
///
/// It must return the identification for the current session index.
type FullIdentificationOf: Convert<Self::ValidatorId, Option<Self::FullIdentification>>;
}
/// Mapping from historical session indices to session-data root hash and validator count.
#[pallet::storage]
#[pallet::getter(fn historical_root)]
pub type HistoricalSessions<T: Config> =
StorageMap<_, Twox64Concat, SessionIndex, (T::Hash, ValidatorCount), OptionQuery>;
/// The range of historical sessions we store. [first, last)
#[pallet::storage]
pub type StoredRange<T> = StorageValue<_, (SessionIndex, SessionIndex), OptionQuery>;
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T> {
/// The merkle root of the validators of the said session were stored
RootStored { index: SessionIndex },
/// The merkle roots of up to this session index were pruned
RootsPruned { up_to: SessionIndex },
}
}
impl<T: Config> Pallet<T> {
/// Prune historical stored session roots up to (but not including)
/// `up_to`.
pub fn prune_up_to(up_to: SessionIndex) {
StoredRange::<T>::mutate(|range| {
let (start, end) = match *range {
Some(range) => range,
None => return, // nothing to prune.
};
let up_to = core::cmp::min(up_to, end);
if up_to < start {
return; // out of bounds. harmless.
}
(start..up_to).for_each(HistoricalSessions::<T>::remove);
let new_start = up_to;
*range = if new_start == end {
None // nothing is stored.
} else {
Some((new_start, end))
}
});
Self::deposit_event(Event::<T>::RootsPruned { up_to });
}
fn full_id_validators() -> Vec<(T::ValidatorId, T::FullIdentification)> {
<Session<T>>::validators()
.into_iter()
.filter_map(|validator| {
T::FullIdentificationOf::convert(validator.clone())
.map(|full_id| (validator, full_id))
})
.collect::<Vec<_>>()
}
}
impl<T: Config> ValidatorSet<T::AccountId> for Pallet<T> {
type ValidatorId = T::ValidatorId;
type ValidatorIdOf = T::ValidatorIdOf;
fn session_index() -> sp_staking::SessionIndex {
super::Pallet::<T>::current_index()
}
fn validators() -> Vec<Self::ValidatorId> {
super::Pallet::<T>::validators()
}
}
impl<T: Config> ValidatorSetWithIdentification<T::AccountId> for Pallet<T> {
type Identification = T::FullIdentification;
type IdentificationOf = T::FullIdentificationOf;
}
/// Specialization of the crate-level `SessionManager` which returns the set of full identification
/// when creating a new session.
pub trait SessionManager<ValidatorId, FullIdentification>:
pallet_session::SessionManager<ValidatorId>
{
/// If there was a validator set change, its returns the set of new validators along with their
/// full identifications.
fn new_session(new_index: SessionIndex) -> Option<Vec<(ValidatorId, FullIdentification)>>;
fn new_session_genesis(
new_index: SessionIndex,
) -> Option<Vec<(ValidatorId, FullIdentification)>> {
<Self as SessionManager<_, _>>::new_session(new_index)
}
fn start_session(start_index: SessionIndex);
fn end_session(end_index: SessionIndex);
}
/// An `SessionManager` implementation that wraps an inner `I` and also
/// sets the historical trie root of the ending session.
pub struct NoteHistoricalRoot<T, I>(core::marker::PhantomData<(T, I)>);
impl<T: Config, I: SessionManager<T::ValidatorId, T::FullIdentification>> NoteHistoricalRoot<T, I> {
fn do_new_session(new_index: SessionIndex, is_genesis: bool) -> Option<Vec<T::ValidatorId>> {
<StoredRange<T>>::mutate(|range| {
range.get_or_insert_with(|| (new_index, new_index)).1 = new_index + 1;
});
let new_validators_and_id = if is_genesis {
<I as SessionManager<_, _>>::new_session_genesis(new_index)
} else {
<I as SessionManager<_, _>>::new_session(new_index)
};
let new_validators_opt = new_validators_and_id
.as_ref()
.map(|new_validators| new_validators.iter().map(|(v, _id)| v.clone()).collect());
if let Some(new_validators) = new_validators_and_id {
let count = new_validators.len() as ValidatorCount;
match ProvingTrie::<T>::generate_for(new_validators) {
Ok(trie) => {
<HistoricalSessions<T>>::insert(new_index, &(trie.root, count));
Pallet::<T>::deposit_event(Event::RootStored { index: new_index });
},
Err(reason) => {
print("Failed to generate historical ancestry-inclusion proof.");
print(reason);
},
};
} else {
let previous_index = new_index.saturating_sub(1);
if let Some(previous_session) = <HistoricalSessions<T>>::get(previous_index) {
<HistoricalSessions<T>>::insert(new_index, previous_session);
Pallet::<T>::deposit_event(Event::RootStored { index: new_index });
}
}
new_validators_opt
}
}
impl<T: Config, I> pallet_session::SessionManager<T::ValidatorId> for NoteHistoricalRoot<T, I>
where
I: SessionManager<T::ValidatorId, T::FullIdentification>,
{
fn new_session(new_index: SessionIndex) -> Option<Vec<T::ValidatorId>> {
Self::do_new_session(new_index, false)
}
fn new_session_genesis(new_index: SessionIndex) -> Option<Vec<T::ValidatorId>> {
Self::do_new_session(new_index, true)
}
fn start_session(start_index: SessionIndex) {
<I as SessionManager<_, _>>::start_session(start_index)
}
fn end_session(end_index: SessionIndex) {
onchain::store_session_validator_set_to_offchain::<T>(end_index);
<I as SessionManager<_, _>>::end_session(end_index)
}
}
/// A tuple of the validator's ID and their full identification.
pub type IdentificationTuple<T> =
(<T as pallet_session::Config>::ValidatorId, <T as Config>::FullIdentification);
/// A trie instance for checking and generating proofs.
pub struct ProvingTrie<T: Config> {
db: MemoryDB<T::Hashing>,
root: T::Hash,
}
impl<T: Config> ProvingTrie<T> {
fn generate_for<I>(validators: I) -> Result<Self, &'static str>
where
I: IntoIterator<Item = (T::ValidatorId, T::FullIdentification)>,
{
let mut db = MemoryDB::with_hasher(RandomState::default());
let mut root = Default::default();
{
let mut trie = TrieDBMutBuilderV0::new(&mut db, &mut root).build();
for (i, (validator, full_id)) in validators.into_iter().enumerate() {
let i = i as u32;
let keys = match <Session<T>>::load_keys(&validator) {
None => continue,
Some(k) => k,
};
let id_tuple = (validator, full_id);
// map each key to the owner index.
for key_id in T::Keys::key_ids() {
let key = keys.get_raw(*key_id);
let res =
(key_id, key).using_encoded(|k| i.using_encoded(|v| trie.insert(k, v)));
res.map_err(|_| "failed to insert into trie")?;
}
// map each owner index to the full identification.
i.using_encoded(|k| id_tuple.using_encoded(|v| trie.insert(k, v)))
.map_err(|_| "failed to insert into trie")?;
}
}
Ok(ProvingTrie { db, root })
}
fn from_proof(root: T::Hash, proof: StorageProof) -> Self {
ProvingTrie { db: proof.into_memory_db(), root }
}
/// Prove the full verification data for a given key and key ID.
pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option<Vec<Vec<u8>>> {
let mut recorder = Recorder::<LayoutV0<T::Hashing>>::new();
self.query(key_id, key_data, Some(&mut recorder));
Some(recorder.into_raw_storage_proof())
}
/// Access the underlying trie root.
pub fn root(&self) -> &T::Hash {
&self.root
}
/// Search for a key inside the proof.
fn query(
&self,
key_id: KeyTypeId,
key_data: &[u8],
recorder: Option<&mut dyn TrieRecorder<T::Hash>>,
) -> Option<IdentificationTuple<T>> {
let trie = TrieDBBuilder::new(&self.db, &self.root)
.with_optional_recorder(recorder)
.build();
let val_idx = (key_id, key_data)
.using_encoded(|s| trie.get(s))
.ok()?
.and_then(|raw| u32::decode(&mut &*raw).ok())?;
val_idx
.using_encoded(|s| trie.get(s))
.ok()?
.and_then(|raw| <IdentificationTuple<T>>::decode(&mut &*raw).ok())
}
}
impl<T: Config, D: AsRef<[u8]>> KeyOwnerProofSystem<(KeyTypeId, D)> for Pallet<T> {
type Proof = MembershipProof;
type IdentificationTuple = IdentificationTuple<T>;
fn prove(key: (KeyTypeId, D)) -> Option<Self::Proof> {
let session = <Session<T>>::current_index();
let validators = Self::full_id_validators();
let count = validators.len() as ValidatorCount;
let trie = ProvingTrie::<T>::generate_for(validators).ok()?;
let (id, data) = key;
trie.prove(id, data.as_ref()).map(|trie_nodes| MembershipProof {
session,
trie_nodes,
validator_count: count,
})
}
fn check_proof(key: (KeyTypeId, D), proof: Self::Proof) -> Option<IdentificationTuple<T>> {
fn print_error<E: Debug>(e: E) {
log::error!(
target: LOG_TARGET,
"Rejecting equivocation report because of key ownership proof error: {:?}", e
);
}
let (id, data) = key;
let (root, count) = if proof.session == <Session<T>>::current_index() {
let validators = Self::full_id_validators();
let count = validators.len() as ValidatorCount;
let trie = ProvingTrie::<T>::generate_for(validators).map_err(print_error).ok()?;
(trie.root, count)
} else {
<HistoricalSessions<T>>::get(&proof.session)?
};
if count != proof.validator_count {
print_error("InvalidCount");
return None;
}
let proof = StorageProof::new_with_duplicate_nodes_check(proof.trie_nodes)
.map_err(print_error)
.ok()?;
let mut accessed_nodes_tracker = AccessedNodesTracker::<T::Hash>::new(proof.len());
let trie = ProvingTrie::<T>::from_proof(root, proof);
let res = trie.query(id, data.as_ref(), Some(&mut accessed_nodes_tracker))?;
accessed_nodes_tracker.ensure_no_unused_nodes().map_err(print_error).ok()?;
Some(res)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::mock::{
force_new_session, set_next_validators, NextValidators, Session, System, Test,
};
use alloc::vec;
use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId, BuildStorage};
use sp_state_machine::BasicExternalities;
use frame_support::traits::{KeyOwnerProofSystem, OnInitialize};
type Historical = Pallet<Test>;
pub(crate) fn new_test_ext() -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
let keys: Vec<_> = NextValidators::get()
.iter()
.cloned()
.map(|i| (i, i, UintAuthorityId(i).into()))
.collect();
BasicExternalities::execute_with_storage(&mut t, || {
for (ref k, ..) in &keys {
frame_system::Pallet::<Test>::inc_providers(k);
}
});
pallet_session::GenesisConfig::<Test> { keys, ..Default::default() }
.assimilate_storage(&mut t)
.unwrap();
sp_io::TestExternalities::new(t)
}
#[test]
fn generated_proof_is_good() {
new_test_ext().execute_with(|| {
set_next_validators(vec![1, 2]);
force_new_session();
System::set_block_number(1);
Session::on_initialize(1);
let encoded_key_1 = UintAuthorityId(1).encode();
let proof = Historical::prove((DUMMY, &encoded_key_1[..])).unwrap();
// proof-checking in the same session is OK.
assert!(Historical::check_proof((DUMMY, &encoded_key_1[..]), proof.clone()).is_some());
set_next_validators(vec![1, 2, 4]);
force_new_session();
System::set_block_number(2);
Session::on_initialize(2);
assert!(Historical::historical_root(proof.session).is_some());
assert!(Session::current_index() > proof.session);
// proof-checking in the next session is also OK.
assert!(Historical::check_proof((DUMMY, &encoded_key_1[..]), proof.clone()).is_some());
set_next_validators(vec![1, 2, 5]);
force_new_session();
System::set_block_number(3);
Session::on_initialize(3);
});
}
#[test]
fn prune_up_to_works() {
new_test_ext().execute_with(|| {
for i in 1..99u64 {
set_next_validators(vec![i]);
force_new_session();
System::set_block_number(i);
Session::on_initialize(i);
}
assert_eq!(<StoredRange<Test>>::get(), Some((0, 100)));
for i in 0..100 {
assert!(Historical::historical_root(i).is_some())
}
Historical::prune_up_to(10);
assert_eq!(<StoredRange<Test>>::get(), Some((10, 100)));
Historical::prune_up_to(9);
assert_eq!(<StoredRange<Test>>::get(), Some((10, 100)));
for i in 10..100 {
assert!(Historical::historical_root(i).is_some())
}
Historical::prune_up_to(99);
assert_eq!(<StoredRange<Test>>::get(), Some((99, 100)));
Historical::prune_up_to(100);
assert_eq!(<StoredRange<Test>>::get(), None);
for i in 99..199u64 {
set_next_validators(vec![i]);
force_new_session();
System::set_block_number(i);
Session::on_initialize(i);
}
assert_eq!(<StoredRange<Test>>::get(), Some((100, 200)));
for i in 100..200 {
assert!(Historical::historical_root(i).is_some())
}
Historical::prune_up_to(9999);
assert_eq!(<StoredRange<Test>>::get(), None);
for i in 100..200 {
assert!(Historical::historical_root(i).is_none())
}
});
}
}
@@ -0,0 +1,262 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Off-chain logic for creating a proof based data provided by on-chain logic.
//!
//! Validator-set extracting an iterator from an off-chain worker stored list containing historical
//! validator-sets. Based on the logic of historical slashing, but the validation is done off-chain.
//! Use [`fn store_current_session_validator_set_to_offchain()`](super::onchain) to store the
//! required data to the offchain validator set. This is used in conjunction with [`ProvingTrie`]
//! and the off-chain indexing API.
use alloc::vec::Vec;
use sp_runtime::{
offchain::storage::{MutateStorageError, StorageRetrievalError, StorageValueRef},
KeyTypeId,
};
use sp_session::MembershipProof;
use super::{shared, Config, IdentificationTuple, ProvingTrie};
use crate::{Pallet as SessionModule, SessionIndex};
/// A set of validators, which was used for a fixed session index.
struct ValidatorSet<T: Config> {
validator_set: Vec<IdentificationTuple<T>>,
}
impl<T: Config> ValidatorSet<T> {
/// Load the set of validators for a particular session index from the off-chain storage.
///
/// If none is found or decodable given `prefix` and `session`, it will return `None`.
/// Empty validator sets should only ever exist for genesis blocks.
pub fn load_from_offchain_db(session_index: SessionIndex) -> Option<Self> {
let derived_key = shared::derive_key(shared::PREFIX, session_index);
StorageValueRef::persistent(derived_key.as_ref())
.get::<Vec<(T::ValidatorId, T::FullIdentification)>>()
.ok()
.flatten()
.map(|validator_set| Self { validator_set })
}
#[inline]
fn len(&self) -> usize {
self.validator_set.len()
}
}
/// Implement conversion into iterator for usage
/// with [ProvingTrie](super::ProvingTrie::generate_for).
impl<T: Config> core::iter::IntoIterator for ValidatorSet<T> {
type Item = (T::ValidatorId, T::FullIdentification);
type IntoIter = alloc::vec::IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.validator_set.into_iter()
}
}
/// Create a proof based on the data available in the off-chain database.
///
/// Based on the yielded `MembershipProof` the implementer may decide what
/// to do, i.e. in case of a failed proof, enqueue a transaction back on
/// chain reflecting that, with all its consequences such as i.e. slashing.
pub fn prove_session_membership<T: Config, D: AsRef<[u8]>>(
session_index: SessionIndex,
session_key: (KeyTypeId, D),
) -> Option<MembershipProof> {
let validators = ValidatorSet::<T>::load_from_offchain_db(session_index)?;
let count = validators.len() as u32;
let trie = ProvingTrie::<T>::generate_for(validators.into_iter()).ok()?;
let (id, data) = session_key;
trie.prove(id, data.as_ref()).map(|trie_nodes| MembershipProof {
session: session_index,
trie_nodes,
validator_count: count,
})
}
/// Attempt to prune anything that is older than `first_to_keep` session index.
///
/// Due to re-organisation it could be that the `first_to_keep` might be less
/// than the stored one, in which case the conservative choice is made to keep records
/// up to the one that is the lesser.
pub fn prune_older_than<T: Config>(first_to_keep: SessionIndex) {
let derived_key = shared::LAST_PRUNE.to_vec();
let entry = StorageValueRef::persistent(derived_key.as_ref());
match entry.mutate(
|current: Result<Option<SessionIndex>, StorageRetrievalError>| -> Result<_, ()> {
match current {
Ok(Some(current)) if current < first_to_keep => Ok(first_to_keep),
// do not move the cursor, if the new one would be behind ours
Ok(Some(current)) => Ok(current),
Ok(None) => Ok(first_to_keep),
// if the storage contains undecodable data, overwrite with current anyways
// which might leak some entries being never purged, but that is acceptable
// in this context
Err(_) => Ok(first_to_keep),
}
},
) {
Ok(new_value) => {
// on a re-org this is not necessarily true, with the above they might be equal
if new_value < first_to_keep {
for session_index in new_value..first_to_keep {
let derived_key = shared::derive_key(shared::PREFIX, session_index);
let _ = StorageValueRef::persistent(derived_key.as_ref()).clear();
}
}
},
Err(MutateStorageError::ConcurrentModification(_)) => {},
Err(MutateStorageError::ValueFunctionFailed(_)) => {},
}
}
/// Keep the newest `n` items, and prune all items older than that.
pub fn keep_newest<T: Config>(n_to_keep: usize) {
let session_index = <SessionModule<T>>::current_index();
let n_to_keep = n_to_keep as SessionIndex;
if n_to_keep < session_index {
prune_older_than::<T>(session_index - n_to_keep)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
historical::{onchain, Pallet},
mock::{force_new_session, set_next_validators, NextValidators, Session, System, Test},
};
use codec::Encode;
use sp_core::{
crypto::key_types::DUMMY,
offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt, StorageKind},
};
use sp_runtime::{testing::UintAuthorityId, BuildStorage};
use sp_state_machine::BasicExternalities;
use frame_support::traits::{KeyOwnerProofSystem, OnInitialize};
type Historical = Pallet<Test>;
pub fn new_test_ext() -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::<Test>::default()
.build_storage()
.expect("Failed to create test externalities.");
let keys: Vec<_> = NextValidators::get()
.iter()
.cloned()
.map(|i| (i, i, UintAuthorityId(i).into()))
.collect();
BasicExternalities::execute_with_storage(&mut t, || {
for (ref k, ..) in &keys {
frame_system::Pallet::<Test>::inc_providers(k);
}
});
crate::GenesisConfig::<Test> { keys, ..Default::default() }
.assimilate_storage(&mut t)
.unwrap();
let mut ext = sp_io::TestExternalities::new(t);
let (offchain, offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db());
const ITERATIONS: u32 = 5u32;
let mut seed = [0u8; 32];
seed[0..4].copy_from_slice(&ITERATIONS.to_le_bytes());
offchain_state.write().seed = seed;
ext.register_extension(OffchainDbExt::new(offchain.clone()));
ext.register_extension(OffchainWorkerExt::new(offchain));
ext
}
#[test]
fn encode_decode_roundtrip() {
use super::super::{super::Config as SessionConfig, Config as HistoricalConfig};
use codec::{Decode, Encode};
let sample = (
22u32 as <Test as SessionConfig>::ValidatorId,
7_777_777 as <Test as HistoricalConfig>::FullIdentification,
);
let encoded = sample.encode();
let decoded = Decode::decode(&mut encoded.as_slice()).expect("Must decode");
assert_eq!(sample, decoded);
}
#[test]
fn onchain_to_offchain() {
let mut ext = new_test_ext();
const DATA: &[u8] = &[7, 8, 9, 10, 11];
ext.execute_with(|| {
b"alphaomega"[..].using_encoded(|key| sp_io::offchain_index::set(key, DATA));
});
ext.persist_offchain_overlay();
ext.execute_with(|| {
let data = b"alphaomega"[..].using_encoded(|key| {
sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, key)
});
assert_eq!(data, Some(DATA.to_vec()));
});
}
#[test]
fn historical_proof_offchain() {
let mut ext = new_test_ext();
let encoded_key_1 = UintAuthorityId(1).encode();
ext.execute_with(|| {
set_next_validators(vec![1, 2]);
force_new_session();
System::set_block_number(1);
Session::on_initialize(1);
// "on-chain"
onchain::store_current_session_validator_set_to_offchain::<Test>();
assert_eq!(<SessionModule<Test>>::current_index(), 1);
set_next_validators(vec![7, 8]);
force_new_session();
});
ext.persist_offchain_overlay();
ext.execute_with(|| {
System::set_block_number(2);
Session::on_initialize(2);
assert_eq!(<SessionModule<Test>>::current_index(), 2);
// "off-chain"
let proof = prove_session_membership::<Test, _>(1, (DUMMY, &encoded_key_1));
assert!(proof.is_some());
let proof = proof.expect("Must be Some(Proof)");
assert!(Historical::check_proof((DUMMY, &encoded_key_1[..]), proof.clone()).is_some());
});
}
}
@@ -0,0 +1,59 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! On-chain logic to store a validator-set for deferred validation using an off-chain worker.
use alloc::vec::Vec;
use codec::Encode;
use sp_runtime::traits::Convert;
use super::{shared, Config as HistoricalConfig};
use crate::{Config as SessionConfig, Pallet as SessionModule, SessionIndex};
/// Store the validator-set associated to the `session_index` to the off-chain database.
///
/// Further processing is then done [`off-chain side`](super::offchain).
///
/// **Must** be called from on-chain, i.e. a call that originates from
/// `on_initialize(..)` or `on_finalization(..)`.
/// **Must** be called during the session, which validator-set is to be stored for further
/// off-chain processing. Otherwise the `FullIdentification` might not be available.
pub fn store_session_validator_set_to_offchain<T: HistoricalConfig + SessionConfig>(
session_index: SessionIndex,
) {
let encoded_validator_list = <SessionModule<T>>::validators()
.into_iter()
.filter_map(|validator_id: <T as SessionConfig>::ValidatorId| {
let full_identification =
<<T as HistoricalConfig>::FullIdentificationOf>::convert(validator_id.clone());
full_identification.map(|full_identification| (validator_id, full_identification))
})
.collect::<Vec<_>>();
encoded_validator_list.using_encoded(|encoded_validator_list| {
let derived_key = shared::derive_key(shared::PREFIX, session_index);
sp_io::offchain_index::set(derived_key.as_slice(), encoded_validator_list);
});
}
/// Store the validator set associated to the _current_ session index to the off-chain database.
///
/// See [`store_session_validator_set_to_offchain`]
/// for further information and restrictions.
pub fn store_current_session_validator_set_to_offchain<T: HistoricalConfig + SessionConfig>() {
store_session_validator_set_to_offchain::<T>(<SessionModule<T>>::current_index());
}
@@ -0,0 +1,36 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Shared logic between on-chain and off-chain components used for slashing using an off-chain
//! worker.
use alloc::{borrow::ToOwned, vec::Vec};
use codec::Encode;
use sp_staking::SessionIndex;
pub(super) const PREFIX: &[u8] = b"session_historical";
pub(super) const LAST_PRUNE: &[u8] = b"session_historical_last_prune";
/// Derive the key used to store the list of validators
pub(super) fn derive_key<P: AsRef<[u8]>>(prefix: P, session_index: SessionIndex) -> Vec<u8> {
session_index.using_encoded(|encoded_session_index| {
let mut key = prefix.as_ref().to_owned();
key.push(b'/');
key.extend_from_slice(encoded_session_index);
key
})
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,196 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use core::str;
use sp_io::hashing::twox_128;
use frame_support::{
storage::{generator::StorageValue, StoragePrefixedMap},
traits::{
Get, GetStorageVersion, PalletInfoAccess, StorageVersion,
STORAGE_VERSION_STORAGE_KEY_POSTFIX,
},
weights::Weight,
};
use crate::historical as pallet_session_historical;
const LOG_TARGET: &str = "runtime::session_historical";
const OLD_PREFIX: &str = "Session";
/// Migrate the entire storage of this pallet to a new prefix.
///
/// This new prefix must be the same as the one set in construct_runtime.
///
/// The migration will look into the storage version in order not to trigger a migration on an up
/// to date storage. Thus the on chain storage version must be less than 1 in order to trigger the
/// migration.
pub fn migrate<T: pallet_session_historical::Config, P: GetStorageVersion + PalletInfoAccess>(
) -> Weight {
let new_pallet_name = <P as PalletInfoAccess>::name();
if new_pallet_name == OLD_PREFIX {
log::info!(
target: LOG_TARGET,
"New pallet name is equal to the old prefix. No migration needs to be done.",
);
return Weight::zero();
}
let on_chain_storage_version = <P as GetStorageVersion>::on_chain_storage_version();
log::info!(
target: LOG_TARGET,
"Running migration to v1 for session_historical with storage version {:?}",
on_chain_storage_version,
);
if on_chain_storage_version < 1 {
let storage_prefix = pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
frame_support::storage::migration::move_storage_from_pallet(
storage_prefix,
OLD_PREFIX.as_bytes(),
new_pallet_name.as_bytes(),
);
log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
let storage_prefix = pallet_session_historical::StoredRange::<T>::storage_prefix();
frame_support::storage::migration::move_storage_from_pallet(
storage_prefix,
OLD_PREFIX.as_bytes(),
new_pallet_name.as_bytes(),
);
log_migration("migration", storage_prefix, OLD_PREFIX, new_pallet_name);
StorageVersion::new(1).put::<P>();
<T as frame_system::Config>::BlockWeights::get().max_block
} else {
log::warn!(
target: LOG_TARGET,
"Attempted to apply migration to v1 but failed because storage version is {:?}",
on_chain_storage_version,
);
Weight::zero()
}
}
/// Some checks prior to migration. This can be linked to
/// `frame_support::traits::OnRuntimeUpgrade::pre_upgrade` for further testing.
///
/// Panics if anything goes wrong.
pub fn pre_migrate<
T: pallet_session_historical::Config,
P: GetStorageVersion + PalletInfoAccess,
>() {
let new_pallet_name = <P as PalletInfoAccess>::name();
let storage_prefix_historical_sessions =
pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
log_migration("pre-migration", storage_prefix_historical_sessions, OLD_PREFIX, new_pallet_name);
log_migration("pre-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
if new_pallet_name == OLD_PREFIX {
return;
}
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
let storage_version_key = twox_128(STORAGE_VERSION_STORAGE_KEY_POSTFIX);
let mut new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
new_pallet_prefix.to_vec(),
new_pallet_prefix.to_vec(),
|key| Ok(key.to_vec()),
);
// Ensure nothing except the storage_version_key is stored in the new prefix.
assert!(new_pallet_prefix_iter.all(|key| key == storage_version_key));
assert!(<P as GetStorageVersion>::on_chain_storage_version() < 1);
}
/// Some checks for after migration. This can be linked to
/// `frame_support::traits::OnRuntimeUpgrade::post_upgrade` for further testing.
///
/// Panics if anything goes wrong.
pub fn post_migrate<
T: pallet_session_historical::Config,
P: GetStorageVersion + PalletInfoAccess,
>() {
let new_pallet_name = <P as PalletInfoAccess>::name();
let storage_prefix_historical_sessions =
pallet_session_historical::HistoricalSessions::<T>::storage_prefix();
let storage_prefix_stored_range = pallet_session_historical::StoredRange::<T>::storage_prefix();
log_migration(
"post-migration",
storage_prefix_historical_sessions,
OLD_PREFIX,
new_pallet_name,
);
log_migration("post-migration", storage_prefix_stored_range, OLD_PREFIX, new_pallet_name);
if new_pallet_name == OLD_PREFIX {
return;
}
// Assert that no `HistoricalSessions` and `StoredRange` storages remains at the old prefix.
let old_pallet_prefix = twox_128(OLD_PREFIX.as_bytes());
let old_historical_sessions_key =
[&old_pallet_prefix, &twox_128(storage_prefix_historical_sessions)[..]].concat();
let old_historical_sessions_key_iter = frame_support::storage::KeyPrefixIterator::new(
old_historical_sessions_key.to_vec(),
old_historical_sessions_key.to_vec(),
|_| Ok(()),
);
assert_eq!(old_historical_sessions_key_iter.count(), 0);
let old_stored_range_key =
[&old_pallet_prefix, &twox_128(storage_prefix_stored_range)[..]].concat();
let old_stored_range_key_iter = frame_support::storage::KeyPrefixIterator::new(
old_stored_range_key.to_vec(),
old_stored_range_key.to_vec(),
|_| Ok(()),
);
assert_eq!(old_stored_range_key_iter.count(), 0);
// Assert that the `HistoricalSessions` and `StoredRange` storages (if they exist) have been
// moved to the new prefix.
// NOTE: storage_version_key is already in the new prefix.
let new_pallet_prefix = twox_128(new_pallet_name.as_bytes());
let new_pallet_prefix_iter = frame_support::storage::KeyPrefixIterator::new(
new_pallet_prefix.to_vec(),
new_pallet_prefix.to_vec(),
|_| Ok(()),
);
assert!(new_pallet_prefix_iter.count() >= 1);
assert_eq!(<P as GetStorageVersion>::on_chain_storage_version(), 1);
}
fn log_migration(stage: &str, storage_prefix: &[u8], old_pallet_name: &str, new_pallet_name: &str) {
log::info!(
target: LOG_TARGET,
"{} prefix of storage '{}': '{}' ==> '{}'",
stage,
str::from_utf8(storage_prefix).unwrap_or("<Invalid UTF8>"),
old_pallet_name,
new_pallet_name,
);
}
@@ -0,0 +1,25 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Version 1.
///
/// In version 0 session historical pallet uses `Session` for storage module prefix.
/// In version 1 it uses its name as configured in `construct_runtime`.
/// This migration moves session historical pallet storages from old prefix to new prefix.
#[cfg(feature = "historical")]
pub mod historical;
pub mod v1;
@@ -0,0 +1,103 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Config, DisabledValidators as NewDisabledValidators, Pallet, Vec};
use frame_support::{
pallet_prelude::{Get, ValueQuery, Weight},
traits::UncheckedOnRuntimeUpgrade,
};
use sp_staking::offence::OffenceSeverity;
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
#[cfg(feature = "try-runtime")]
use frame_support::ensure;
use frame_support::migrations::VersionedMigration;
/// This is the storage getting migrated.
#[frame_support::storage_alias]
type DisabledValidators<T: Config> = StorageValue<Pallet<T>, Vec<u32>, ValueQuery>;
pub trait MigrateDisabledValidators {
/// Peek the list of disabled validators and their offence severity.
#[cfg(feature = "try-runtime")]
fn peek_disabled() -> Vec<(u32, OffenceSeverity)>;
/// Return the list of disabled validators and their offence severity, removing them from the
/// underlying storage.
fn take_disabled() -> Vec<(u32, OffenceSeverity)>;
}
pub struct InitOffenceSeverity<T>(core::marker::PhantomData<T>);
impl<T: Config> MigrateDisabledValidators for InitOffenceSeverity<T> {
#[cfg(feature = "try-runtime")]
fn peek_disabled() -> Vec<(u32, OffenceSeverity)> {
DisabledValidators::<T>::get()
.iter()
.map(|v| (*v, OffenceSeverity::max_severity()))
.collect::<Vec<_>>()
}
fn take_disabled() -> Vec<(u32, OffenceSeverity)> {
DisabledValidators::<T>::take()
.iter()
.map(|v| (*v, OffenceSeverity::max_severity()))
.collect::<Vec<_>>()
}
}
pub struct VersionUncheckedMigrateV0ToV1<T, S: MigrateDisabledValidators>(
core::marker::PhantomData<(T, S)>,
);
impl<T: Config, S: MigrateDisabledValidators> UncheckedOnRuntimeUpgrade
for VersionUncheckedMigrateV0ToV1<T, S>
{
fn on_runtime_upgrade() -> Weight {
let disabled = S::take_disabled();
NewDisabledValidators::<T>::put(disabled);
T::DbWeight::get().reads_writes(1, 1)
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
let source_disabled = S::peek_disabled().iter().map(|(v, _s)| *v).collect::<Vec<_>>();
let existing_disabled = DisabledValidators::<T>::get();
ensure!(source_disabled == existing_disabled, "Disabled validators mismatch");
Ok(Vec::new())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
let validators_max_index = crate::Validators::<T>::get().len() as u32 - 1;
for (v, _s) in NewDisabledValidators::<T>::get() {
ensure!(v <= validators_max_index, "Disabled validator index out of bounds");
}
Ok(())
}
}
pub type MigrateV0ToV1<T, S> = VersionedMigration<
0,
1,
VersionUncheckedMigrateV0ToV1<T, S>,
Pallet<T>,
<T as frame_system::Config>::DbWeight,
>;
+319
View File
@@ -0,0 +1,319 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Mock helpers for Session.
use super::*;
use crate as pallet_session;
#[cfg(feature = "historical")]
use crate::historical as pallet_session_historical;
use frame_support::{derive_impl, parameter_types, traits::ConstU64};
use pallet_balances::{self, AccountData};
use sp_core::crypto::key_types::DUMMY;
use sp_runtime::{
impl_opaque_keys,
testing::UintAuthorityId,
traits::{Convert, OpaqueKeys},
BuildStorage,
};
use sp_staking::SessionIndex;
use std::collections::BTreeMap;
impl_opaque_keys! {
pub struct MockSessionKeys {
pub dummy: UintAuthorityId,
}
}
impl From<UintAuthorityId> for MockSessionKeys {
fn from(dummy: UintAuthorityId) -> Self {
Self { dummy }
}
}
pub const KEY_ID_A: KeyTypeId = KeyTypeId([4; 4]);
pub const KEY_ID_B: KeyTypeId = KeyTypeId([9; 4]);
#[derive(Debug, Clone, codec::Encode, codec::Decode, PartialEq, Eq)]
pub struct PreUpgradeMockSessionKeys {
pub a: [u8; 32],
pub b: [u8; 64],
}
impl OpaqueKeys for PreUpgradeMockSessionKeys {
type KeyTypeIdProviders = ();
fn key_ids() -> &'static [KeyTypeId] {
&[KEY_ID_A, KEY_ID_B]
}
fn get_raw(&self, i: KeyTypeId) -> &[u8] {
match i {
i if i == KEY_ID_A => &self.a[..],
i if i == KEY_ID_B => &self.b[..],
_ => &[],
}
}
}
type Block = frame_system::mocking::MockBlock<Test>;
#[cfg(feature = "historical")]
frame_support::construct_runtime!(
pub enum Test
{
System: frame_system,
Session: pallet_session,
Balances: pallet_balances,
Historical: pallet_session_historical,
}
);
#[cfg(not(feature = "historical"))]
frame_support::construct_runtime!(
pub enum Test
{
System: frame_system,
Session: pallet_session,
Balances: pallet_balances,
}
);
parameter_types! {
pub static Validators: Vec<u64> = vec![1, 2, 3];
pub static NextValidators: Vec<u64> = vec![1, 2, 3];
pub static Authorities: Vec<UintAuthorityId> =
vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)];
pub static ForceSessionEnd: bool = false;
pub static SessionLength: u64 = 2;
pub static SessionChanged: bool = false;
pub static TestSessionChanged: bool = false;
pub static Disabled: bool = false;
// Stores if `on_before_session_end` was called
pub static BeforeSessionEndCalled: bool = false;
pub static ValidatorAccounts: BTreeMap<u64, u64> = BTreeMap::new();
pub static KeyDeposit: u64 = 10;
}
pub struct TestShouldEndSession;
impl ShouldEndSession<u64> for TestShouldEndSession {
fn should_end_session(now: u64) -> bool {
let l = SessionLength::get();
now % l == 0 ||
ForceSessionEnd::mutate(|l| {
let r = *l;
*l = false;
r
})
}
}
pub struct TestSessionHandler;
impl SessionHandler<u64> for TestSessionHandler {
const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID];
fn on_genesis_session<T: OpaqueKeys>(_validators: &[(u64, T)]) {}
fn on_new_session<T: OpaqueKeys>(
changed: bool,
validators: &[(u64, T)],
_queued_validators: &[(u64, T)],
) {
SessionChanged::mutate(|l| *l = changed);
Authorities::mutate(|l| {
*l = validators
.iter()
.map(|(_, id)| id.get::<UintAuthorityId>(DUMMY).unwrap_or_default())
.collect()
});
}
fn on_disabled(_validator_index: u32) {
Disabled::mutate(|l| *l = true)
}
fn on_before_session_ending() {
BeforeSessionEndCalled::mutate(|b| *b = true);
}
}
pub struct TestSessionManager;
impl SessionManager<u64> for TestSessionManager {
fn end_session(_: SessionIndex) {}
fn start_session(_: SessionIndex) {}
fn new_session(_: SessionIndex) -> Option<Vec<u64>> {
if !TestSessionChanged::get() {
Validators::mutate(|v| {
*v = NextValidators::get().clone();
Some(v.clone())
})
} else if Disabled::mutate(|l| std::mem::replace(&mut *l, false)) {
// If there was a disabled validator, underlying conditions have changed
// so we return `Some`.
Some(Validators::get().clone())
} else {
None
}
}
}
#[cfg(feature = "historical")]
impl crate::historical::SessionManager<u64, u64> for TestSessionManager {
fn end_session(_: SessionIndex) {}
fn start_session(_: SessionIndex) {}
fn new_session(new_index: SessionIndex) -> Option<Vec<(u64, u64)>> {
<Self as SessionManager<_>>::new_session(new_index)
.map(|vals| vals.into_iter().map(|val| (val, val)).collect())
}
}
pub fn authorities() -> Vec<UintAuthorityId> {
Authorities::get().to_vec()
}
pub fn force_new_session() {
ForceSessionEnd::mutate(|l| *l = true)
}
pub fn set_session_length(x: u64) {
SessionLength::mutate(|l| *l = x)
}
pub fn session_changed() -> bool {
SessionChanged::get()
}
pub fn set_next_validators(next: Vec<u64>) {
NextValidators::mutate(|v| *v = next);
}
pub fn before_session_end_called() -> bool {
BeforeSessionEndCalled::get()
}
pub fn reset_before_session_end_called() {
BeforeSessionEndCalled::mutate(|b| *b = false);
}
parameter_types! {
pub static LastSessionEventIndex: usize = 0;
}
pub fn session_events_since_last_call() -> Vec<pallet_session::Event<Test>> {
let events = System::read_events_for_pallet::<pallet_session::Event<Test>>();
let already_seen = LastSessionEventIndex::get();
LastSessionEventIndex::set(events.len());
events.into_iter().skip(already_seen).collect()
}
pub fn session_hold(who: u64) -> u64 {
<Balances as frame_support::traits::fungible::InspectHold<_>>::balance_on_hold(
&crate::HoldReason::Keys.into(),
&who,
)
}
pub fn new_test_ext() -> sp_io::TestExternalities {
let mut t = frame_system::GenesisConfig::<Test>::default().build_storage().unwrap();
let ed = <Test as pallet_balances::Config>::ExistentialDeposit::get();
pallet_balances::GenesisConfig::<Test> {
balances: vec![
(1, (KeyDeposit::get() * 10).max(ed)),
(2, (KeyDeposit::get() * 10).max(ed)),
(3, (KeyDeposit::get() * 10).max(ed)),
(4, (KeyDeposit::get() * 10).max(ed)),
(69, (KeyDeposit::get() * 10).max(ed)),
// one account who does not have enough balance to pay the key deposit
(999, (KeyDeposit::get().saturating_sub(1)).max(ed)),
(1000, (KeyDeposit::get() * 10).max(ed)),
],
dev_accounts: None,
}
.assimilate_storage(&mut t)
.unwrap();
let keys: Vec<_> = NextValidators::get()
.iter()
.cloned()
.map(|i| (i, i, UintAuthorityId(i).into()))
.collect();
pallet_session::GenesisConfig::<Test> { keys, ..Default::default() }
.assimilate_storage(&mut t)
.unwrap();
let v = NextValidators::get().iter().map(|&i| (i, i)).collect();
ValidatorAccounts::mutate(|m| *m = v);
sp_io::TestExternalities::new(t)
}
#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
impl frame_system::Config for Test {
type Block = Block;
type AccountData = AccountData<u64>;
type RuntimeEvent = RuntimeEvent;
}
impl pallet_timestamp::Config for Test {
type Moment = u64;
type OnTimestampSet = ();
type MinimumPeriod = ConstU64<5>;
type WeightInfo = ();
}
pub struct TestValidatorIdOf;
impl TestValidatorIdOf {
pub fn set(v: BTreeMap<u64, u64>) {
ValidatorAccounts::mutate(|m| *m = v);
}
}
impl Convert<u64, Option<u64>> for TestValidatorIdOf {
fn convert(x: u64) -> Option<u64> {
ValidatorAccounts::get().get(&x).cloned()
}
}
// Disabling threshold for `UpToLimitDisablingStrategy` and
// `UpToLimitWithReEnablingDisablingStrategy``
pub(crate) const DISABLING_LIMIT_FACTOR: usize = 3;
impl Config for Test {
type ShouldEndSession = TestShouldEndSession;
#[cfg(feature = "historical")]
type SessionManager = crate::historical::NoteHistoricalRoot<Test, TestSessionManager>;
#[cfg(not(feature = "historical"))]
type SessionManager = TestSessionManager;
type SessionHandler = TestSessionHandler;
type ValidatorId = u64;
type ValidatorIdOf = TestValidatorIdOf;
type Keys = MockSessionKeys;
type RuntimeEvent = RuntimeEvent;
type NextSessionRotation = ();
type DisablingStrategy =
disabling::UpToLimitWithReEnablingDisablingStrategy<DISABLING_LIMIT_FACTOR>;
type WeightInfo = ();
type Currency = pallet_balances::Pallet<Test>;
type KeyDeposit = KeyDeposit;
}
#[cfg(feature = "historical")]
impl crate::historical::Config for Test {
type RuntimeEvent = RuntimeEvent;
type FullIdentification = u64;
type FullIdentificationOf = sp_runtime::traits::ConvertInto;
}
#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)]
impl pallet_balances::Config for Test {
type AccountStore = System;
type RuntimeEvent = RuntimeEvent;
}
+842
View File
@@ -0,0 +1,842 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Tests for the Session Pallet
use super::*;
use crate::mock::{
authorities, before_session_end_called, force_new_session, new_test_ext,
reset_before_session_end_called, session_changed, session_events_since_last_call, session_hold,
set_next_validators, set_session_length, Balances, KeyDeposit, MockSessionKeys,
PreUpgradeMockSessionKeys, RuntimeOrigin, Session, SessionChanged, System, Test,
TestSessionChanged, TestValidatorIdOf, ValidatorAccounts,
};
use codec::Decode;
use sp_core::crypto::key_types::DUMMY;
use sp_runtime::{testing::UintAuthorityId, Perbill};
use frame_support::{
assert_err, assert_noop, assert_ok,
traits::{ConstU64, OnInitialize},
};
fn initialize_block(block: u64) {
SessionChanged::mutate(|l| *l = false);
System::set_block_number(block);
Session::on_initialize(block);
}
#[test]
fn simple_setup_should_work() {
new_test_ext().execute_with(|| {
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]);
assert_eq!(Validators::<Test>::get(), vec![1, 2, 3]);
});
}
#[test]
fn put_get_keys() {
new_test_ext().execute_with(|| {
Session::put_keys(&10, &UintAuthorityId(10).into());
assert_eq!(Session::load_keys(&10), Some(UintAuthorityId(10).into()));
})
}
#[test]
fn keys_cleared_on_kill() {
let mut ext = new_test_ext();
ext.execute_with(|| {
assert_eq!(Validators::<Test>::get(), vec![1, 2, 3]);
assert_eq!(Session::load_keys(&1), Some(UintAuthorityId(1).into()));
let id = DUMMY;
assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1));
assert!(System::is_provider_required(&1));
assert_ok!(Session::purge_keys(RuntimeOrigin::signed(1)));
assert!(!System::is_provider_required(&1));
assert_eq!(Session::load_keys(&1), None);
assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), None);
})
}
#[test]
fn purge_keys_works_for_stash_id() {
let mut ext = new_test_ext();
ext.execute_with(|| {
assert_eq!(Validators::<Test>::get(), vec![1, 2, 3]);
TestValidatorIdOf::set(vec![(10, 1), (20, 2), (3, 3)].into_iter().collect());
assert_eq!(Session::load_keys(&1), Some(UintAuthorityId(1).into()));
assert_eq!(Session::load_keys(&2), Some(UintAuthorityId(2).into()));
let id = DUMMY;
assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1));
assert_ok!(Session::purge_keys(RuntimeOrigin::signed(10)));
assert_ok!(Session::purge_keys(RuntimeOrigin::signed(2)));
assert_eq!(Session::load_keys(&10), None);
assert_eq!(Session::load_keys(&20), None);
assert_eq!(Session::key_owner(id, UintAuthorityId(10).get_raw(id)), None);
assert_eq!(Session::key_owner(id, UintAuthorityId(20).get_raw(id)), None);
})
}
#[test]
fn authorities_should_track_validators() {
reset_before_session_end_called();
new_test_ext().execute_with(|| {
TestValidatorIdOf::set(vec![(1, 1), (2, 2), (3, 3), (4, 4)].into_iter().collect());
set_next_validators(vec![1, 2]);
force_new_session();
initialize_block(1);
assert_eq!(
QueuedKeys::<Test>::get(),
vec![(1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()),]
);
assert_eq!(Validators::<Test>::get(), vec![1, 2, 3]);
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]);
assert!(before_session_end_called());
reset_before_session_end_called();
force_new_session();
initialize_block(2);
assert_eq!(
QueuedKeys::<Test>::get(),
vec![(1, UintAuthorityId(1).into()), (2, UintAuthorityId(2).into()),]
);
assert_eq!(Validators::<Test>::get(), vec![1, 2]);
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]);
assert!(before_session_end_called());
reset_before_session_end_called();
set_next_validators(vec![1, 2, 4]);
assert_ok!(Session::set_keys(RuntimeOrigin::signed(4), UintAuthorityId(4).into(), vec![]));
force_new_session();
initialize_block(3);
assert_eq!(
QueuedKeys::<Test>::get(),
vec![
(1, UintAuthorityId(1).into()),
(2, UintAuthorityId(2).into()),
(4, UintAuthorityId(4).into()),
]
);
assert_eq!(Validators::<Test>::get(), vec![1, 2]);
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]);
assert!(before_session_end_called());
force_new_session();
initialize_block(4);
assert_eq!(
QueuedKeys::<Test>::get(),
vec![
(1, UintAuthorityId(1).into()),
(2, UintAuthorityId(2).into()),
(4, UintAuthorityId(4).into()),
]
);
assert_eq!(Validators::<Test>::get(), vec![1, 2, 4]);
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)]);
});
}
#[test]
fn should_work_with_early_exit() {
new_test_ext().execute_with(|| {
set_session_length(10);
initialize_block(1);
assert_eq!(CurrentIndex::<Test>::get(), 0);
initialize_block(2);
assert_eq!(CurrentIndex::<Test>::get(), 0);
force_new_session();
initialize_block(3);
assert_eq!(CurrentIndex::<Test>::get(), 1);
initialize_block(9);
assert_eq!(CurrentIndex::<Test>::get(), 1);
initialize_block(10);
assert_eq!(CurrentIndex::<Test>::get(), 2);
});
}
#[test]
fn session_change_should_work() {
new_test_ext().execute_with(|| {
// Block 1: No change
initialize_block(1);
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]);
assert_eq!(session_events_since_last_call(), vec![]);
// Block 2: Session rollover, but no change.
initialize_block(2);
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]);
assert_eq!(
session_events_since_last_call(),
vec![Event::NewQueued, Event::NewSession { session_index: 1 }]
);
// Block 3: Set new key for validator 2; no visible change.
initialize_block(3);
assert_ok!(Session::set_keys(RuntimeOrigin::signed(2), UintAuthorityId(5).into(), vec![]));
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]);
assert_eq!(session_events_since_last_call(), vec![]);
// Block 4: Session rollover; no visible change.
initialize_block(4);
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]);
assert_eq!(
session_events_since_last_call(),
vec![Event::NewQueued, Event::NewSession { session_index: 2 }]
);
// Block 5: No change.
initialize_block(5);
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]);
assert_eq!(session_events_since_last_call(), vec![]);
// Block 6: Session rollover; authority 2 changes.
initialize_block(6);
assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(5), UintAuthorityId(3)]);
assert_eq!(
session_events_since_last_call(),
vec![Event::NewQueued, Event::NewSession { session_index: 3 }]
);
});
}
#[test]
fn duplicates_are_not_allowed() {
new_test_ext().execute_with(|| {
TestValidatorIdOf::set(vec![(1, 1), (2, 2), (3, 3), (4, 4)].into_iter().collect());
System::set_block_number(1);
Session::on_initialize(1);
assert_noop!(
Session::set_keys(RuntimeOrigin::signed(4), UintAuthorityId(1).into(), vec![]),
Error::<Test>::DuplicatedKey,
);
assert_ok!(Session::set_keys(RuntimeOrigin::signed(1), UintAuthorityId(10).into(), vec![]));
// is fine now that 1 has migrated off.
assert_ok!(Session::set_keys(RuntimeOrigin::signed(4), UintAuthorityId(1).into(), vec![]));
});
}
#[test]
fn session_changed_flag_works() {
reset_before_session_end_called();
new_test_ext().execute_with(|| {
TestValidatorIdOf::set(vec![(1, 1), (2, 2), (3, 3), (69, 69)].into_iter().collect());
TestSessionChanged::mutate(|l| *l = true);
force_new_session();
initialize_block(1);
assert!(!session_changed());
assert!(before_session_end_called());
reset_before_session_end_called();
force_new_session();
initialize_block(2);
assert!(!session_changed());
assert!(before_session_end_called());
reset_before_session_end_called();
Session::disable_index(0);
force_new_session();
initialize_block(3);
assert!(!session_changed());
assert!(before_session_end_called());
reset_before_session_end_called();
force_new_session();
initialize_block(4);
assert!(session_changed());
assert!(before_session_end_called());
reset_before_session_end_called();
force_new_session();
initialize_block(5);
assert!(!session_changed());
assert!(before_session_end_called());
reset_before_session_end_called();
assert_ok!(Session::set_keys(RuntimeOrigin::signed(2), UintAuthorityId(5).into(), vec![]));
force_new_session();
initialize_block(6);
assert!(!session_changed());
assert!(before_session_end_called());
reset_before_session_end_called();
// changing the keys of a validator leads to change.
assert_ok!(Session::set_keys(
RuntimeOrigin::signed(69),
UintAuthorityId(69).into(),
vec![]
));
force_new_session();
initialize_block(7);
assert!(session_changed());
assert!(before_session_end_called());
reset_before_session_end_called();
// while changing the keys of a non-validator does not.
force_new_session();
initialize_block(7);
assert!(!session_changed());
assert!(before_session_end_called());
reset_before_session_end_called();
});
}
#[test]
fn periodic_session_works() {
type P = PeriodicSessions<ConstU64<10>, ConstU64<3>>;
// make sure that offset phase behaves correctly
for i in 0u64..3 {
assert!(!P::should_end_session(i));
assert_eq!(P::estimate_next_session_rotation(i).0.unwrap(), 3);
// the last block of the session (i.e. the one before session rotation)
// should have progress 100%.
if P::estimate_next_session_rotation(i).0.unwrap() - 1 == i {
assert_eq!(
P::estimate_current_session_progress(i).0.unwrap(),
Permill::from_percent(100)
);
} else {
assert!(
P::estimate_current_session_progress(i).0.unwrap() < Permill::from_percent(100)
);
}
}
// we end the session at block #3 and we consider this block the first one
// from the next session. since we're past the offset phase it represents
// 1/10 of progress.
assert!(P::should_end_session(3u64));
assert_eq!(P::estimate_next_session_rotation(3u64).0.unwrap(), 3);
assert_eq!(P::estimate_current_session_progress(3u64).0.unwrap(), Permill::from_percent(10));
for i in (1u64..10).map(|i| 3 + i) {
assert!(!P::should_end_session(i));
assert_eq!(P::estimate_next_session_rotation(i).0.unwrap(), 13);
// as with the offset phase the last block of the session must have 100%
// progress.
if P::estimate_next_session_rotation(i).0.unwrap() - 1 == i {
assert_eq!(
P::estimate_current_session_progress(i).0.unwrap(),
Permill::from_percent(100)
);
} else {
assert!(
P::estimate_current_session_progress(i).0.unwrap() < Permill::from_percent(100)
);
}
}
// the new session starts and we proceed in 1/10 increments.
assert!(P::should_end_session(13u64));
assert_eq!(P::estimate_next_session_rotation(13u64).0.unwrap(), 23);
assert_eq!(P::estimate_current_session_progress(13u64).0.unwrap(), Permill::from_percent(10));
assert!(!P::should_end_session(14u64));
assert_eq!(P::estimate_next_session_rotation(14u64).0.unwrap(), 23);
assert_eq!(P::estimate_current_session_progress(14u64).0.unwrap(), Permill::from_percent(20));
}
#[test]
fn session_keys_generate_output_works_as_set_keys_input() {
new_test_ext().execute_with(|| {
let new_keys = mock::MockSessionKeys::generate(None);
assert_ok!(Session::set_keys(
RuntimeOrigin::signed(2),
<mock::Test as Config>::Keys::decode(&mut &new_keys[..]).expect("Decode keys"),
vec![],
));
});
}
#[test]
fn upgrade_keys() {
use frame_support::storage;
use sp_core::crypto::key_types::DUMMY;
// This test assumes certain mocks.
assert_eq!(mock::NextValidators::get().clone(), vec![1, 2, 3]);
assert_eq!(mock::Validators::get().clone(), vec![1, 2, 3]);
new_test_ext().execute_with(|| {
let pre_one = PreUpgradeMockSessionKeys { a: [1u8; 32], b: [1u8; 64] };
let pre_two = PreUpgradeMockSessionKeys { a: [2u8; 32], b: [2u8; 64] };
let pre_three = PreUpgradeMockSessionKeys { a: [3u8; 32], b: [3u8; 64] };
let val_keys = vec![(1u64, pre_one), (2u64, pre_two), (3u64, pre_three)];
// Set `QueuedKeys`.
{
let storage_key = super::QueuedKeys::<Test>::hashed_key();
assert!(storage::unhashed::exists(&storage_key));
storage::unhashed::put(&storage_key, &val_keys);
}
// Set `NextKeys`.
{
for &(i, ref keys) in val_keys.iter() {
let storage_key = super::NextKeys::<Test>::hashed_key_for(i);
assert!(storage::unhashed::exists(&storage_key));
storage::unhashed::put(&storage_key, keys);
}
}
// Set `KeyOwner`.
{
for &(i, ref keys) in val_keys.iter() {
// clear key owner for `UintAuthorityId` keys set in genesis.
let presumed = UintAuthorityId(i);
let raw_prev = presumed.as_ref();
assert_eq!(Session::key_owner(DUMMY, raw_prev), Some(i));
Session::clear_key_owner(DUMMY, raw_prev);
Session::put_key_owner(mock::KEY_ID_A, keys.get_raw(mock::KEY_ID_A), &i);
Session::put_key_owner(mock::KEY_ID_B, keys.get_raw(mock::KEY_ID_B), &i);
}
}
// Do the upgrade and check sanity.
let mock_keys_for = |val| mock::MockSessionKeys { dummy: UintAuthorityId(val) };
Session::upgrade_keys::<PreUpgradeMockSessionKeys, _>(|val, _old_keys| mock_keys_for(val));
// Check key ownership.
for (i, ref keys) in val_keys.iter() {
assert!(Session::key_owner(mock::KEY_ID_A, keys.get_raw(mock::KEY_ID_A)).is_none());
assert!(Session::key_owner(mock::KEY_ID_B, keys.get_raw(mock::KEY_ID_B)).is_none());
let migrated_key = UintAuthorityId(*i);
assert_eq!(Session::key_owner(DUMMY, migrated_key.as_ref()), Some(*i));
}
// Check queued keys.
assert_eq!(
QueuedKeys::<Test>::get(),
vec![(1, mock_keys_for(1)), (2, mock_keys_for(2)), (3, mock_keys_for(3)),],
);
for i in 1u64..4 {
assert_eq!(super::NextKeys::<Test>::get(&i), Some(mock_keys_for(i)));
}
})
}
#[cfg(feature = "historical")]
#[test]
fn test_migration_v1() {
use crate::{
historical::{HistoricalSessions, StoredRange},
mock::Historical,
};
use frame_support::traits::{PalletInfoAccess, StorageVersion};
new_test_ext().execute_with(|| {
assert!(HistoricalSessions::<Test>::iter_values().count() > 0);
assert!(StoredRange::<Test>::exists());
let old_pallet = "Session";
let new_pallet = <Historical as PalletInfoAccess>::name();
frame_support::storage::migration::move_pallet(
new_pallet.as_bytes(),
old_pallet.as_bytes(),
);
StorageVersion::new(0).put::<Historical>();
crate::migrations::historical::pre_migrate::<Test, Historical>();
crate::migrations::historical::migrate::<Test, Historical>();
crate::migrations::historical::post_migrate::<Test, Historical>();
});
}
#[test]
fn set_keys_should_fail_with_insufficient_funds() {
new_test_ext().execute_with(|| {
// Account 999 is mocked to have KeyDeposit -1
let account_id = 999;
let keys = MockSessionKeys { dummy: UintAuthorityId(account_id).into() };
frame_system::Pallet::<Test>::inc_providers(&account_id);
// Make sure we have a validator ID
ValidatorAccounts::mutate(|m| {
m.insert(account_id, account_id);
});
// Attempt to set keys with an account that has insufficient funds
// Should fail with Err(Token(FundsUnavailable)) from `pallet-balances`
assert_err!(
Session::set_keys(RuntimeOrigin::signed(account_id), keys, vec![]),
sp_runtime::TokenError::FundsUnavailable
);
});
}
#[test]
fn set_keys_should_hold_funds() {
new_test_ext().execute_with(|| {
// Account 1000 is mocked to have sufficient funds
let account_id = 1000;
let keys = MockSessionKeys { dummy: UintAuthorityId(account_id).into() };
let deposit = KeyDeposit::get();
// Make sure we have a validator ID
ValidatorAccounts::mutate(|m| {
m.insert(account_id, account_id);
});
// Set keys and check the operation succeeds
let res = Session::set_keys(RuntimeOrigin::signed(account_id), keys, vec![]);
assert_ok!(res);
// Check that the funds are held
assert_eq!(session_hold(account_id), deposit);
});
}
#[test]
fn purge_keys_should_unhold_funds() {
new_test_ext().execute_with(|| {
// Account 1000 is mocked to have sufficient funds
let account_id = 1000;
let keys = MockSessionKeys { dummy: UintAuthorityId(account_id).into() };
let deposit = KeyDeposit::get();
// Make sure we have a validator ID
ValidatorAccounts::mutate(|m| {
m.insert(account_id, account_id);
});
// Ensure system providers are properly set for the test account
frame_system::Pallet::<Test>::inc_providers(&account_id);
// First set the keys to reserve the deposit
let res = Session::set_keys(RuntimeOrigin::signed(account_id), keys, vec![]);
assert_ok!(res);
// Check the reserved balance after setting keys
let reserved_balance_before_purge = Balances::reserved_balance(&account_id);
assert!(
reserved_balance_before_purge >= deposit,
"Deposit should be reserved after setting keys"
);
// Now purge the keys
let res = Session::purge_keys(RuntimeOrigin::signed(account_id));
assert_ok!(res);
// Check that the funds were unreserved
let reserved_balance_after_purge = Balances::reserved_balance(&account_id);
assert_eq!(reserved_balance_after_purge, reserved_balance_before_purge - deposit);
});
}
#[test]
fn existing_validators_without_hold_are_except() {
// upon addition of `SessionDeposit`, a runtime may have some old validators without any held
// amount. They can freely still update their session keys. They can also purge them.
// disable key deposit for initial validators
KeyDeposit::set(0);
new_test_ext().execute_with(|| {
// reset back to the first value.
KeyDeposit::set(10);
// 1 is an initial validator
assert_eq!(session_hold(1), 0);
// upgrade 1's keys
assert_ok!(Session::set_keys(
RuntimeOrigin::signed(1),
UintAuthorityId(7).into(),
Default::default()
));
assert_eq!(session_hold(1), 0);
// purge 1's keys
assert_ok!(Session::purge_keys(RuntimeOrigin::signed(1)));
assert_eq!(session_hold(1), 0);
});
}
mod disabling_byzantine_threshold {
use super::*;
use crate::disabling::{DisablingStrategy, UpToLimitDisablingStrategy};
use sp_staking::offence::OffenceSeverity;
// Common test data - the stash of the offending validator, the era of the offence and the
// active set
const OFFENDER_ID: <Test as frame_system::Config>::AccountId = 7;
const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100));
const MIN_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0));
const ACTIVE_SET: [<Test as Config>::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7];
const OFFENDER_VALIDATOR_IDX: u32 = 6;
#[test]
fn disable_when_below_byzantine_threshold() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled = vec![(1, MAX_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
MAX_OFFENDER_SEVERITY,
&initially_disabled,
);
assert_eq!(disabling_decision.disable, Some(OFFENDER_VALIDATOR_IDX));
});
}
#[test]
fn disable_when_below_custom_byzantine_threshold() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled = vec![(1, MAX_OFFENDER_SEVERITY), (2, MAX_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitDisablingStrategy<2> as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
MAX_OFFENDER_SEVERITY,
&initially_disabled,
);
assert_eq!(disabling_decision.disable, Some(OFFENDER_VALIDATOR_IDX));
});
}
#[test]
fn non_slashable_offences_still_disable() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled = vec![(1, MAX_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
OffenceSeverity(Perbill::from_percent(0)),
&initially_disabled,
);
assert_eq!(disabling_decision.disable, Some(OFFENDER_VALIDATOR_IDX));
});
}
#[test]
fn dont_disable_beyond_byzantine_threshold() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled = vec![(1, MIN_OFFENDER_SEVERITY), (2, MAX_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
MAX_OFFENDER_SEVERITY,
&initially_disabled,
);
assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none());
});
}
}
mod disabling_with_reenabling {
use super::*;
use crate::disabling::{DisablingStrategy, UpToLimitWithReEnablingDisablingStrategy};
use sp_staking::offence::OffenceSeverity;
// Common test data - the stash of the offending validator, the era of the offence and the
// active set
const OFFENDER_ID: <Test as frame_system::Config>::AccountId = 7;
const MAX_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(100));
const LOW_OFFENDER_SEVERITY: OffenceSeverity = OffenceSeverity(Perbill::from_percent(0));
const ACTIVE_SET: [<Test as Config>::ValidatorId; 7] = [1, 2, 3, 4, 5, 6, 7];
const OFFENDER_VALIDATOR_IDX: u32 = 6; // the offender is with index 6 in the active set
#[test]
fn disable_when_below_byzantine_threshold() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
MAX_OFFENDER_SEVERITY,
&initially_disabled,
);
// Disable Offender and do not re-enable anyone
assert_eq!(disabling_decision.disable, Some(OFFENDER_VALIDATOR_IDX));
assert_eq!(disabling_decision.reenable, None);
});
}
#[test]
fn reenable_arbitrary_on_equal_severity() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY), (1, MAX_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
MAX_OFFENDER_SEVERITY,
&initially_disabled,
);
assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_some());
// Disable 7 and enable 1
assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX);
assert_eq!(disabling_decision.reenable.unwrap(), 0);
});
}
#[test]
fn do_not_reenable_higher_offenders() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY), (1, MAX_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
LOW_OFFENDER_SEVERITY,
&initially_disabled,
);
assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none());
assert_ok!(Session::do_try_state());
});
}
#[test]
fn reenable_lower_offenders() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled = vec![(0, LOW_OFFENDER_SEVERITY), (1, LOW_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
MAX_OFFENDER_SEVERITY,
&initially_disabled,
);
assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_some());
// Disable 7 and enable 1
assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX);
assert_eq!(disabling_decision.reenable.unwrap(), 0);
assert_ok!(Session::do_try_state());
});
}
#[test]
fn reenable_lower_offenders_unordered() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled = vec![(0, MAX_OFFENDER_SEVERITY), (1, LOW_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
MAX_OFFENDER_SEVERITY,
&initially_disabled,
);
assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_some());
// Disable 7 and enable 1
assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX);
assert_eq!(disabling_decision.reenable.unwrap(), 1);
});
}
#[test]
fn update_severity() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled =
vec![(OFFENDER_VALIDATOR_IDX, LOW_OFFENDER_SEVERITY), (0, MAX_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
MAX_OFFENDER_SEVERITY,
&initially_disabled,
);
assert!(disabling_decision.disable.is_some() && disabling_decision.reenable.is_none());
// Disable 7 "again" AKA update their severity
assert_eq!(disabling_decision.disable.unwrap(), OFFENDER_VALIDATOR_IDX);
});
}
#[test]
fn update_cannot_lower_severity() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled =
vec![(OFFENDER_VALIDATOR_IDX, MAX_OFFENDER_SEVERITY), (0, MAX_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
LOW_OFFENDER_SEVERITY,
&initially_disabled,
);
assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none());
});
}
#[test]
fn no_accidental_reenablement_on_repeated_offence() {
sp_io::TestExternalities::default().execute_with(|| {
let initially_disabled =
vec![(OFFENDER_VALIDATOR_IDX, MAX_OFFENDER_SEVERITY), (0, LOW_OFFENDER_SEVERITY)];
Validators::<Test>::put(ACTIVE_SET.to_vec());
let disabling_decision =
<UpToLimitWithReEnablingDisablingStrategy as DisablingStrategy<Test>>::decision(
&OFFENDER_ID,
MAX_OFFENDER_SEVERITY,
&initially_disabled,
);
assert!(disabling_decision.disable.is_none() && disabling_decision.reenable.is_none());
});
}
}
+146
View File
@@ -0,0 +1,146 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Autogenerated weights for `pallet_session`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0
//! DATE: 2025-02-21, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `4563561839a5`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `None`, DB CACHE: `1024`
// Executed Command:
// frame-omni-bencher
// v1
// benchmark
// pallet
// --extrinsic=*
// --runtime=target/production/wbuild/kitchensink-runtime/kitchensink_runtime.wasm
// --pallet=pallet_session
// --header=/__w/pezkuwi-sdk/pezkuwi-sdk/substrate/HEADER-APACHE2
// --output=/__w/pezkuwi-sdk/pezkuwi-sdk/substrate/frame/session/src/weights.rs
// --wasm-execution=compiled
// --steps=50
// --repeat=20
// --heap-pages=4096
// --template=substrate/.maintain/frame-weight-template.hbs
// --no-storage-info
// --no-min-squares
// --no-median-slopes
// --genesis-builder-policy=none
// --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier
#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
#![allow(unused_imports)]
#![allow(missing_docs)]
#![allow(dead_code)]
use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
use core::marker::PhantomData;
/// Weight functions needed for `pallet_session`.
pub trait WeightInfo {
fn set_keys() -> Weight;
fn purge_keys() -> Weight;
}
/// Weights for `pallet_session` using the Substrate node and recommended hardware.
pub struct SubstrateWeight<T>(PhantomData<T>);
impl<T: frame_system::Config> WeightInfo for SubstrateWeight<T> {
/// Storage: `Staking::Ledger` (r:1 w:0)
/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
/// Storage: `Session::NextKeys` (r:1 w:1)
/// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// Storage: `Session::KeyOwner` (r:6 w:6)
/// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn set_keys() -> Weight {
// Proof Size summary in bytes:
// Measured: `1056`
// Estimated: `16896`
// Minimum execution time: 49_530_000 picoseconds.
Weight::from_parts(51_496_000, 16896)
.saturating_add(T::DbWeight::get().reads(8_u64))
.saturating_add(T::DbWeight::get().writes(7_u64))
}
/// Storage: `Staking::Ledger` (r:1 w:0)
/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
/// Storage: `Session::NextKeys` (r:1 w:1)
/// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// Storage: `Session::KeyOwner` (r:0 w:6)
/// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn purge_keys() -> Weight {
// Proof Size summary in bytes:
// Measured: `1333`
// Estimated: `4798`
// Minimum execution time: 39_443_000 picoseconds.
Weight::from_parts(40_486_000, 4798)
.saturating_add(T::DbWeight::get().reads(2_u64))
.saturating_add(T::DbWeight::get().writes(7_u64))
}
}
// For backwards compatibility and tests.
impl WeightInfo for () {
/// Storage: `Staking::Ledger` (r:1 w:0)
/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
/// Storage: `Session::NextKeys` (r:1 w:1)
/// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// Storage: `Session::KeyOwner` (r:6 w:6)
/// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn set_keys() -> Weight {
// Proof Size summary in bytes:
// Measured: `1056`
// Estimated: `16896`
// Minimum execution time: 49_530_000 picoseconds.
Weight::from_parts(51_496_000, 16896)
.saturating_add(RocksDbWeight::get().reads(8_u64))
.saturating_add(RocksDbWeight::get().writes(7_u64))
}
/// Storage: `Staking::Ledger` (r:1 w:0)
/// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`)
/// Storage: `Session::NextKeys` (r:1 w:1)
/// Proof: `Session::NextKeys` (`max_values`: None, `max_size`: None, mode: `Measured`)
/// Storage: `Session::KeyOwner` (r:0 w:6)
/// Proof: `Session::KeyOwner` (`max_values`: None, `max_size`: None, mode: `Measured`)
fn purge_keys() -> Weight {
// Proof Size summary in bytes:
// Measured: `1333`
// Estimated: `4798`
// Minimum execution time: 39_443_000 picoseconds.
Weight::from_parts(40_486_000, 4798)
.saturating_add(RocksDbWeight::get().reads(2_u64))
.saturating_add(RocksDbWeight::get().writes(7_u64))
}
}