contracts: Multi block migrations (#14045)

* Frame Add translate_next

This works similarly to to `translate` but only translate a single entry.
This function will be useful in the context of multi-block migration.

* Move to lazy migration

* Updates

* simplify MockMigration

* wip

* wip

* add bench

* add bench

* fmt

* fix bench

* add .

* ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_contracts

* Apply suggestions from code review

Co-authored-by: Alexander Theißen <alex.theissen@me.com>

* Scalfold v10 / v11 fix tests

* PR comment

* tweak pub use

* wip

* wip

* wip

* misc merge master

* misc merge master

* wip

* rm tmp stuff

* wip

* wip

* wip

* wip

* wip

* fixes

* add state

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* fix

* fixed compilation

* clean up logs

* wip

* Revert "Frame Add translate_next"

This reverts commit 10318fc95c42b1f7f25efeb35e6d947ea02bed88.

* Fix v10 logic

* Apply suggestions from code review

Co-authored-by: Alexander Theißen <alex.theissen@me.com>

* wip

* fixes

* exercise del_queue

* bump sample size

* fmt

* wip

* blank line

* fix lint

* fix rustdoc job lint

* PR comment do not use dangerous into()

* Ad macros for updating mod visibility

* Add doc

* Add max_weight to integrity_test

* fix compilation

* Add no migration tests

* ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_contracts

* fix clippy

* PR review

* Update frame/contracts/src/lib.rs

Co-authored-by: Sasha Gryaznov <hi@agryaznov.com>

* Fix master merge

* fix merge 2

* fix tryruntime

* fix lint

---------

Co-authored-by: Alexander Theißen <alex.theissen@me.com>
Co-authored-by: command-bot <>
Co-authored-by: Sasha Gryaznov <hi@agryaznov.com>
This commit is contained in:
PG Herveou
2023-05-31 16:19:31 +02:00
committed by GitHub
parent b43a1b0b55
commit c7c5fc709c
10 changed files with 2841 additions and 1482 deletions
+537 -410
View File
@@ -15,462 +15,589 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{BalanceOf, CodeHash, Config, Pallet, TrieId, Weight};
use codec::{Decode, Encode};
//! Migration framework for pallets.
/// Macro to include all migration modules.
/// We only want to make these modules public when `runtime-benchmarks` is
/// enabled, so we can access migration code in benchmarks.
macro_rules! use_modules {
($($module:ident),*) => {
$(
#[cfg(feature = "runtime-benchmarks")]
pub mod $module;
#[cfg(not(feature = "runtime-benchmarks"))]
mod $module;
)*
};
}
use_modules!(v9, v10, v11);
use crate::{weights::WeightInfo, Config, Error, MigrationInProgress, Pallet, Weight, LOG_TARGET};
use codec::{Codec, Decode};
use frame_support::{
codec,
pallet_prelude::*,
storage::migration,
storage_alias,
traits::{Get, OnRuntimeUpgrade},
Identity, Twox64Concat,
traits::{ConstU32, OnRuntimeUpgrade},
};
use sp_runtime::traits::Saturating;
use sp_std::{marker::PhantomData, prelude::*};
use sp_std::marker::PhantomData;
#[cfg(feature = "try-runtime")]
use sp_std::prelude::*;
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
const PROOF_ENCODE: &str = "Tuple::max_encoded_len() < Cursor::max_encoded_len()` is verified in `Self::integrity_test()`; qed";
const PROOF_DECODE: &str =
"We encode to the same type in this trait only. No other code touches this item; qed";
fn invalid_version(version: StorageVersion) -> ! {
panic!("Required migration {version:?} not supported by this runtime. This is a bug.");
}
/// The cursor used to store the state of the current migration step.
pub type Cursor = BoundedVec<u8, ConstU32<1024>>;
// In benchmark and tests we use noop migrations, to test and bench the migration framework itself.
#[cfg(not(any(feature = "runtime-benchmarks", test)))]
type Migrations<T> = (v9::Migration<T>, v10::Migration<T>, v11::Migration<T>);
/// IsFinished describes whether a migration is finished or not.
pub enum IsFinished {
Yes,
No,
}
/// A trait that allows to migrate storage from one version to another.
///
/// The migration is done in steps. The migration is finished when
/// `step()` returns `IsFinished::Yes`.
pub trait Migrate: Codec + MaxEncodedLen + Default {
/// Returns the version of the migration.
const VERSION: u16;
/// Returns the maximum weight that can be consumed in a single step.
fn max_step_weight() -> Weight;
/// Process one step of the migration.
///
/// Returns whether the migration is finished and the weight consumed.
fn step(&mut self) -> (IsFinished, Weight);
/// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater
/// than `max_block_weight`.
fn integrity_test(max_block_weight: Weight) {
if Self::max_step_weight().any_gt(max_block_weight) {
panic!(
"Invalid max_step_weight for Migration {}. Value should be lower than {}",
Self::VERSION,
max_block_weight
);
}
let len = <Self as MaxEncodedLen>::max_encoded_len();
let max = Cursor::bound();
if len > max {
panic!(
"Migration {} has size {} which is bigger than the maximum of {}",
Self::VERSION,
len,
max,
);
}
}
/// Execute some pre-checks prior to running the first step of this migration.
#[cfg(feature = "try-runtime")]
fn pre_upgrade_step() -> Result<Vec<u8>, TryRuntimeError> {
Ok(Vec::new())
}
/// Execute some post-checks after running the last step of this migration.
#[cfg(feature = "try-runtime")]
fn post_upgrade_step(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
Ok(())
}
}
/// A noop migration that can be used when there is no migration to be done for a given version.
#[doc(hidden)]
#[derive(frame_support::DefaultNoBound, Encode, Decode, MaxEncodedLen)]
pub struct NoopMigration<const N: u16>;
impl<const N: u16> Migrate for NoopMigration<N> {
const VERSION: u16 = N;
fn max_step_weight() -> Weight {
Weight::zero()
}
fn step(&mut self) -> (IsFinished, Weight) {
log::debug!(target: LOG_TARGET, "Noop migration for version {}", N);
(IsFinished::Yes, Weight::zero())
}
}
mod private {
use crate::migration::Migrate;
pub trait Sealed {}
#[impl_trait_for_tuples::impl_for_tuples(10)]
#[tuple_types_custom_trait_bound(Migrate)]
impl Sealed for Tuple {}
}
/// Defines a sequence of migrations.
///
/// The sequence must be defined by a tuple of migrations, each of which must implement the
/// `Migrate` trait. Migrations must be ordered by their versions with no gaps.
pub trait MigrateSequence: private::Sealed {
/// Returns the range of versions that this migration can handle.
/// Migrations must be ordered by their versions with no gaps.
/// The following code will fail to compile:
///
/// The following code will fail to compile:
/// ```compile_fail
/// # use pallet_contracts::{NoopMigration, MigrateSequence};
/// let _ = <(NoopMigration<1>, NoopMigration<3>)>::VERSION_RANGE;
/// ```
/// The following code will compile:
/// ```
/// # use pallet_contracts::{NoopMigration, MigrateSequence};
/// let _ = <(NoopMigration<1>, NoopMigration<2>)>::VERSION_RANGE;
/// ```
const VERSION_RANGE: (u16, u16);
/// Returns the default cursor for the given version.
fn new(version: StorageVersion) -> Cursor;
#[cfg(feature = "try-runtime")]
fn pre_upgrade_step(_version: StorageVersion) -> Result<Vec<u8>, TryRuntimeError> {
Ok(Vec::new())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade_step(_version: StorageVersion, _state: Vec<u8>) -> Result<(), TryRuntimeError> {
Ok(())
}
/// Execute the migration step until the weight limit is reached.
fn steps(version: StorageVersion, cursor: &[u8], weight_left: &mut Weight) -> StepResult;
/// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater
/// than `max_block_weight`.
fn integrity_test(max_block_weight: Weight);
/// Returns whether migrating from `in_storage` to `target` is supported.
///
/// A migration is supported if (in_storage + 1, target) is contained by `VERSION_RANGE`.
fn is_upgrade_supported(in_storage: StorageVersion, target: StorageVersion) -> bool {
if in_storage == target {
return true
}
if in_storage > target {
return false
}
let (low, high) = Self::VERSION_RANGE;
let Some(first_supported) = low.checked_sub(1) else {
return false
};
in_storage >= first_supported && target == high
}
}
/// Performs all necessary migrations based on `StorageVersion`.
pub struct Migration<T: Config>(PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for Migration<T> {
fn on_runtime_upgrade() -> Weight {
let version = <Pallet<T>>::on_chain_storage_version();
#[cfg(not(any(feature = "runtime-benchmarks", test)))]
pub struct Migration<T: Config, M: MigrateSequence = Migrations<T>>(PhantomData<(T, M)>);
/// Custom migration for running runtime-benchmarks and tests.
#[cfg(any(feature = "runtime-benchmarks", test))]
pub struct Migration<T: Config, M: MigrateSequence = (NoopMigration<1>, NoopMigration<2>)>(
PhantomData<(T, M)>,
);
#[cfg(feature = "try-runtime")]
impl<T: Config, M: MigrateSequence> Migration<T, M> {
fn run_all_steps() -> Result<(), TryRuntimeError> {
let mut weight = Weight::zero();
if version < 4 {
v4::migrate::<T>(&mut weight);
let name = <Pallet<T>>::name();
loop {
let in_progress_version = <Pallet<T>>::on_chain_storage_version() + 1;
let state = M::pre_upgrade_step(in_progress_version)?;
let (status, w) = Self::migrate(Weight::MAX);
weight.saturating_accrue(w);
log::info!(
target: LOG_TARGET,
"{name}: Migration step {:?} weight = {}",
in_progress_version,
weight
);
M::post_upgrade_step(in_progress_version, state)?;
if matches!(status, MigrateResult::Completed) {
break
}
}
if version < 5 {
v5::migrate::<T>(&mut weight);
let name = <Pallet<T>>::name();
log::info!(target: LOG_TARGET, "{name}: Migration steps weight = {}", weight);
Ok(())
}
}
impl<T: Config, M: MigrateSequence> OnRuntimeUpgrade for Migration<T, M> {
fn on_runtime_upgrade() -> Weight {
let name = <Pallet<T>>::name();
let latest_version = <Pallet<T>>::current_storage_version();
let storage_version = <Pallet<T>>::on_chain_storage_version();
if storage_version == latest_version {
log::warn!(
target: LOG_TARGET,
"{name}: No Migration performed storage_version = latest_version = {:?}",
&storage_version
);
return T::WeightInfo::on_runtime_upgrade_noop()
}
if version < 6 {
v6::migrate::<T>(&mut weight);
// In case a migration is already in progress we create the next migration
// (if any) right when the current one finishes.
if Self::in_progress() {
log::warn!(
target: LOG_TARGET,
"{name}: Migration already in progress {:?}",
&storage_version
);
return T::WeightInfo::on_runtime_upgrade_in_progress()
}
if version < 7 {
v7::migrate::<T>(&mut weight);
}
log::info!(
target: LOG_TARGET,
"{name}: Upgrading storage from {storage_version:?} to {latest_version:?}.",
);
if version < 8 {
v8::migrate::<T>(&mut weight);
}
let cursor = M::new(storage_version + 1);
MigrationInProgress::<T>::set(Some(cursor));
if version < 9 {
v9::migrate::<T>(&mut weight);
}
#[cfg(feature = "try-runtime")]
Self::run_all_steps().unwrap();
StorageVersion::new(9).put::<Pallet<T>>();
weight.saturating_accrue(T::DbWeight::get().writes(1));
weight
return T::WeightInfo::on_runtime_upgrade()
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
let version = <Pallet<T>>::on_chain_storage_version();
// We can't really do much here as our migrations do not happen during the runtime upgrade.
// Instead, we call the migrations `pre_upgrade` and `post_upgrade` hooks when we iterate
// over our migrations.
let storage_version = <Pallet<T>>::on_chain_storage_version();
let target_version = <Pallet<T>>::current_storage_version();
if version == 7 {
v8::pre_upgrade::<T>()?;
log::debug!(
target: LOG_TARGET,
"{}: Range supported {:?}, range requested {:?}",
<Pallet<T>>::name(),
M::VERSION_RANGE,
(storage_version, target_version)
);
ensure!(M::is_upgrade_supported(storage_version, target_version), "Unsupported upgrade");
Ok(Default::default())
}
}
/// The result of running the migration.
#[derive(Debug, PartialEq)]
pub enum MigrateResult {
/// No migration was performed
NoMigrationPerformed,
/// No migration currently in progress
NoMigrationInProgress,
/// A migration is in progress
InProgress { steps_done: u32 },
/// All migrations are completed
Completed,
}
/// The result of running a migration step.
#[derive(Debug, PartialEq)]
pub enum StepResult {
InProgress { cursor: Cursor, steps_done: u32 },
Completed { steps_done: u32 },
}
impl<T: Config, M: MigrateSequence> Migration<T, M> {
/// Verify that each migration's step of the MigrateSequence fits into `Cursor`.
pub(crate) fn integrity_test() {
let max_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
M::integrity_test(max_weight)
}
/// Migrate
/// Return the weight used and whether or not a migration is in progress
pub(crate) fn migrate(weight_limit: Weight) -> (MigrateResult, Weight) {
let name = <Pallet<T>>::name();
let mut weight_left = weight_limit;
if weight_left.checked_reduce(T::WeightInfo::migrate()).is_none() {
return (MigrateResult::NoMigrationPerformed, Weight::zero())
}
Ok(version.encode())
}
MigrationInProgress::<T>::mutate_exists(|progress| {
let Some(cursor_before) = progress.as_mut() else {
return (MigrateResult::NoMigrationInProgress, T::WeightInfo::migration_noop())
};
#[cfg(feature = "try-runtime")]
fn post_upgrade(state: Vec<u8>) -> Result<(), TryRuntimeError> {
let version = Decode::decode(&mut state.as_ref()).map_err(|_| "Cannot decode version")?;
post_checks::post_upgrade::<T>(version)
}
}
// if a migration is running it is always upgrading to the next version
let storage_version = <Pallet<T>>::on_chain_storage_version();
let in_progress_version = storage_version + 1;
/// V4: `Schedule` is changed to be a config item rather than an in-storage value.
mod v4 {
use super::*;
log::info!(
target: LOG_TARGET,
"{name}: Migrating from {:?} to {:?},",
storage_version,
in_progress_version,
);
pub fn migrate<T: Config>(weight: &mut Weight) {
#[allow(deprecated)]
migration::remove_storage_prefix(<Pallet<T>>::name().as_bytes(), b"CurrentSchedule", b"");
weight.saturating_accrue(T::DbWeight::get().writes(1));
}
}
let result =
match M::steps(in_progress_version, cursor_before.as_ref(), &mut weight_left) {
StepResult::InProgress { cursor, steps_done } => {
*progress = Some(cursor);
MigrateResult::InProgress { steps_done }
},
StepResult::Completed { steps_done } => {
in_progress_version.put::<Pallet<T>>();
if <Pallet<T>>::current_storage_version() != in_progress_version {
log::info!(
target: LOG_TARGET,
"{name}: Next migration is {:?},",
in_progress_version + 1
);
*progress = Some(M::new(in_progress_version + 1));
MigrateResult::InProgress { steps_done }
} else {
log::info!(
target: LOG_TARGET,
"{name}: All migrations done. At version {:?},",
in_progress_version
);
*progress = None;
MigrateResult::Completed
}
},
};
/// V5: State rent is removed which obsoletes some fields in `ContractInfo`.
mod v5 {
use super::*;
type AliveContractInfo<T> =
RawAliveContractInfo<CodeHash<T>, BalanceOf<T>, <T as frame_system::Config>::BlockNumber>;
type TombstoneContractInfo<T> = RawTombstoneContractInfo<
<T as frame_system::Config>::Hash,
<T as frame_system::Config>::Hashing,
>;
#[derive(Decode)]
enum OldContractInfo<T: Config> {
Alive(AliveContractInfo<T>),
Tombstone(TombstoneContractInfo<T>),
}
#[derive(Decode)]
struct RawAliveContractInfo<CodeHash, Balance, BlockNumber> {
trie_id: TrieId,
_storage_size: u32,
_pair_count: u32,
code_hash: CodeHash,
_rent_allowance: Balance,
_rent_paid: Balance,
_deduct_block: BlockNumber,
_last_write: Option<BlockNumber>,
_reserved: Option<()>,
}
#[derive(Decode)]
struct RawTombstoneContractInfo<H, Hasher>(H, PhantomData<Hasher>);
#[derive(Decode)]
struct OldDeletedContract {
_pair_count: u32,
trie_id: TrieId,
}
pub type ContractInfo<T> = RawContractInfo<CodeHash<T>>;
#[derive(Encode, Decode)]
pub struct RawContractInfo<CodeHash> {
pub trie_id: TrieId,
pub code_hash: CodeHash,
pub _reserved: Option<()>,
}
#[derive(Encode, Decode)]
struct DeletedContract {
trie_id: TrieId,
}
#[storage_alias]
type ContractInfoOf<T: Config> = StorageMap<
Pallet<T>,
Twox64Concat,
<T as frame_system::Config>::AccountId,
ContractInfo<T>,
>;
#[storage_alias]
type DeletionQueue<T: Config> = StorageValue<Pallet<T>, Vec<DeletedContract>>;
pub fn migrate<T: Config>(weight: &mut Weight) {
<ContractInfoOf<T>>::translate(|_key, old: OldContractInfo<T>| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
match old {
OldContractInfo::Alive(old) => Some(ContractInfo::<T> {
trie_id: old.trie_id,
code_hash: old.code_hash,
_reserved: old._reserved,
}),
OldContractInfo::Tombstone(_) => None,
}
});
DeletionQueue::<T>::translate(|old: Option<Vec<OldDeletedContract>>| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
old.map(|old| old.into_iter().map(|o| DeletedContract { trie_id: o.trie_id }).collect())
(result, weight_limit.saturating_sub(weight_left))
})
.ok();
}
pub(crate) fn ensure_migrated() -> DispatchResult {
if Self::in_progress() {
Err(Error::<T>::MigrationInProgress.into())
} else {
Ok(())
}
}
pub(crate) fn in_progress() -> bool {
MigrationInProgress::<T>::exists()
}
}
/// V6: Added storage deposits
mod v6 {
use super::*;
#[impl_trait_for_tuples::impl_for_tuples(10)]
#[tuple_types_custom_trait_bound(Migrate)]
impl MigrateSequence for Tuple {
const VERSION_RANGE: (u16, u16) = {
let mut versions: (u16, u16) = (0, 0);
for_tuples!(
#(
match versions {
(0, 0) => {
versions = (Tuple::VERSION, Tuple::VERSION);
},
(min_version, last_version) if Tuple::VERSION == last_version + 1 => {
versions = (min_version, Tuple::VERSION);
},
_ => panic!("Migrations must be ordered by their versions with no gaps.")
}
)*
);
versions
};
#[derive(Encode, Decode)]
struct OldPrefabWasmModule {
#[codec(compact)]
instruction_weights_version: u32,
#[codec(compact)]
initial: u32,
#[codec(compact)]
maximum: u32,
#[codec(compact)]
refcount: u64,
_reserved: Option<()>,
code: Vec<u8>,
original_code_len: u32,
}
#[derive(Encode, Decode)]
pub struct PrefabWasmModule {
#[codec(compact)]
pub instruction_weights_version: u32,
#[codec(compact)]
pub initial: u32,
#[codec(compact)]
pub maximum: u32,
pub code: Vec<u8>,
}
use v5::ContractInfo as OldContractInfo;
#[derive(Encode, Decode)]
pub struct RawContractInfo<CodeHash, Balance> {
pub trie_id: TrieId,
pub code_hash: CodeHash,
pub storage_deposit: Balance,
}
#[derive(Encode, Decode)]
pub struct OwnerInfo<T: Config> {
owner: T::AccountId,
#[codec(compact)]
deposit: BalanceOf<T>,
#[codec(compact)]
refcount: u64,
}
pub type ContractInfo<T> = RawContractInfo<CodeHash<T>, BalanceOf<T>>;
#[storage_alias]
type ContractInfoOf<T: Config> = StorageMap<
Pallet<T>,
Twox64Concat,
<T as frame_system::Config>::AccountId,
ContractInfo<T>,
>;
#[storage_alias]
type CodeStorage<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, PrefabWasmModule>;
#[storage_alias]
type OwnerInfoOf<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, OwnerInfo<T>>;
pub fn migrate<T: Config>(weight: &mut Weight) {
<ContractInfoOf<T>>::translate(|_key, old: OldContractInfo<T>| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
Some(ContractInfo::<T> {
trie_id: old.trie_id,
code_hash: old.code_hash,
storage_deposit: Default::default(),
})
});
let nobody = T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes())
.expect("Infinite input; no dead input space; qed");
<CodeStorage<T>>::translate(|key, old: OldPrefabWasmModule| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2));
<OwnerInfoOf<T>>::insert(
key,
OwnerInfo {
refcount: old.refcount,
owner: nobody.clone(),
deposit: Default::default(),
},
);
Some(PrefabWasmModule {
instruction_weights_version: old.instruction_weights_version,
initial: old.initial,
maximum: old.maximum,
code: old.code,
})
});
}
}
/// Rename `AccountCounter` to `Nonce`.
mod v7 {
use super::*;
pub fn migrate<T: Config>(weight: &mut Weight) {
#[storage_alias]
type AccountCounter<T: Config> = StorageValue<Pallet<T>, u64, ValueQuery>;
#[storage_alias]
type Nonce<T: Config> = StorageValue<Pallet<T>, u64, ValueQuery>;
Nonce::<T>::set(AccountCounter::<T>::take());
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2))
}
}
/// Update `ContractInfo` with new fields that track storage deposits.
mod v8 {
use super::*;
use sp_io::default_child_storage as child;
use v6::ContractInfo as OldContractInfo;
#[derive(Encode, Decode)]
pub struct ContractInfo<T: Config> {
pub trie_id: TrieId,
pub code_hash: CodeHash<T>,
pub storage_bytes: u32,
pub storage_items: u32,
pub storage_byte_deposit: BalanceOf<T>,
pub storage_item_deposit: BalanceOf<T>,
pub storage_base_deposit: BalanceOf<T>,
}
#[storage_alias]
type ContractInfoOf<T: Config, V> =
StorageMap<Pallet<T>, Twox64Concat, <T as frame_system::Config>::AccountId, V>;
pub fn migrate<T: Config>(weight: &mut Weight) {
<ContractInfoOf<T, ContractInfo<T>>>::translate_values(|old: OldContractInfo<T>| {
// Count storage items of this contract
let mut storage_bytes = 0u32;
let mut storage_items = 0u32;
let mut key = Vec::new();
while let Some(next) = child::next_key(&old.trie_id, &key) {
key = next;
let mut val_out = [];
let len = child::read(&old.trie_id, &key, &mut val_out, 0)
.expect("The loop conditions checks for existence of the key; qed");
storage_bytes.saturating_accrue(len);
storage_items.saturating_accrue(1);
}
let storage_byte_deposit =
T::DepositPerByte::get().saturating_mul(storage_bytes.into());
let storage_item_deposit =
T::DepositPerItem::get().saturating_mul(storage_items.into());
let storage_base_deposit = old
.storage_deposit
.saturating_sub(storage_byte_deposit)
.saturating_sub(storage_item_deposit);
// Reads: One read for each storage item plus the contract info itself.
// Writes: Only the new contract info.
weight.saturating_accrue(
T::DbWeight::get().reads_writes(u64::from(storage_items) + 1, 1),
);
Some(ContractInfo {
trie_id: old.trie_id,
code_hash: old.code_hash,
storage_bytes,
storage_items,
storage_byte_deposit,
storage_item_deposit,
storage_base_deposit,
})
});
fn new(version: StorageVersion) -> Cursor {
for_tuples!(
#(
if version == Tuple::VERSION {
return Tuple::default().encode().try_into().expect(PROOF_ENCODE)
}
)*
);
invalid_version(version)
}
#[cfg(feature = "try-runtime")]
pub fn pre_upgrade<T: Config>() -> Result<(), TryRuntimeError> {
use frame_support::traits::ReservableCurrency;
for (key, value) in ContractInfoOf::<T, OldContractInfo<T>>::iter() {
let reserved = T::Currency::reserved_balance(&key);
ensure!(reserved >= value.storage_deposit, "Reserved balance out of sync.");
}
Ok(())
/// Execute the pre-checks of the step associated with this version.
fn pre_upgrade_step(version: StorageVersion) -> Result<Vec<u8>, TryRuntimeError> {
for_tuples!(
#(
if version == Tuple::VERSION {
return Tuple::pre_upgrade_step()
}
)*
);
invalid_version(version)
}
#[cfg(feature = "try-runtime")]
/// Execute the post-checks of the step associated with this version.
fn post_upgrade_step(version: StorageVersion, state: Vec<u8>) -> Result<(), TryRuntimeError> {
for_tuples!(
#(
if version == Tuple::VERSION {
return Tuple::post_upgrade_step(state)
}
)*
);
invalid_version(version)
}
fn steps(version: StorageVersion, mut cursor: &[u8], weight_left: &mut Weight) -> StepResult {
for_tuples!(
#(
if version == Tuple::VERSION {
let mut migration = <Tuple as Decode>::decode(&mut cursor)
.expect(PROOF_DECODE);
let max_weight = Tuple::max_step_weight();
let mut steps_done = 0;
while weight_left.all_gt(max_weight) {
let (finished, weight) = migration.step();
steps_done += 1;
weight_left.saturating_reduce(weight);
if matches!(finished, IsFinished::Yes) {
return StepResult::Completed{ steps_done }
}
}
return StepResult::InProgress{cursor: migration.encode().try_into().expect(PROOF_ENCODE), steps_done }
}
)*
);
invalid_version(version)
}
fn integrity_test(max_block_weight: Weight) {
for_tuples!(
#(
Tuple::integrity_test(max_block_weight);
)*
);
}
}
/// Update `CodeStorage` with the new `determinism` field.
mod v9 {
#[cfg(test)]
mod test {
use super::*;
use crate::Determinism;
use v6::PrefabWasmModule as OldPrefabWasmModule;
use crate::tests::{ExtBuilder, Test};
#[derive(Encode, Decode)]
pub struct PrefabWasmModule {
#[codec(compact)]
pub instruction_weights_version: u32,
#[codec(compact)]
pub initial: u32,
#[codec(compact)]
pub maximum: u32,
pub code: Vec<u8>,
pub determinism: Determinism,
#[derive(Default, Encode, Decode, MaxEncodedLen)]
struct MockMigration<const N: u16> {
// MockMigration<N> needs `N` steps to finish
count: u16,
}
#[storage_alias]
type CodeStorage<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, PrefabWasmModule>;
impl<const N: u16> Migrate for MockMigration<N> {
const VERSION: u16 = N;
fn max_step_weight() -> Weight {
Weight::from_all(1)
}
fn step(&mut self) -> (IsFinished, Weight) {
assert!(self.count != N);
self.count += 1;
if self.count == N {
(IsFinished::Yes, Weight::from_all(1))
} else {
(IsFinished::No, Weight::from_all(1))
}
}
}
pub fn migrate<T: Config>(weight: &mut Weight) {
<CodeStorage<T>>::translate_values(|old: OldPrefabWasmModule| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
Some(PrefabWasmModule {
instruction_weights_version: old.instruction_weights_version,
initial: old.initial,
maximum: old.maximum,
code: old.code,
determinism: Determinism::Enforced,
})
#[test]
fn version_range_works() {
let range = <(MockMigration<1>, MockMigration<2>)>::VERSION_RANGE;
assert_eq!(range, (1, 2));
}
#[test]
fn is_upgrade_supported_works() {
type M = (MockMigration<9>, MockMigration<10>, MockMigration<11>);
[(1, 1), (8, 11), (9, 11)].into_iter().for_each(|(from, to)| {
assert!(
M::is_upgrade_supported(StorageVersion::new(from), StorageVersion::new(to)),
"{} -> {} is supported",
from,
to
)
});
[(1, 0), (0, 3), (7, 11), (8, 10)].into_iter().for_each(|(from, to)| {
assert!(
!M::is_upgrade_supported(StorageVersion::new(from), StorageVersion::new(to)),
"{} -> {} is not supported",
from,
to
)
});
}
#[test]
fn steps_works() {
type M = (MockMigration<2>, MockMigration<3>);
let version = StorageVersion::new(2);
let mut cursor = M::new(version);
let mut weight = Weight::from_all(2);
let result = M::steps(version, &cursor, &mut weight);
cursor = vec![1u8, 0].try_into().unwrap();
assert_eq!(result, StepResult::InProgress { cursor: cursor.clone(), steps_done: 1 });
assert_eq!(weight, Weight::from_all(1));
let mut weight = Weight::from_all(2);
assert_eq!(
M::steps(version, &cursor, &mut weight),
StepResult::Completed { steps_done: 1 }
);
}
#[test]
fn no_migration_in_progress_works() {
type M = (MockMigration<1>, MockMigration<2>);
type TestMigration = Migration<Test, M>;
ExtBuilder::default().build().execute_with(|| {
assert_eq!(StorageVersion::get::<Pallet<Test>>(), 2);
assert_eq!(TestMigration::migrate(Weight::MAX).0, MigrateResult::NoMigrationInProgress)
});
}
#[test]
fn migration_works() {
type M = (MockMigration<1>, MockMigration<2>);
type TestMigration = Migration<Test, M>;
ExtBuilder::default().set_storage_version(0).build().execute_with(|| {
assert_eq!(StorageVersion::get::<Pallet<Test>>(), 0);
TestMigration::on_runtime_upgrade();
for (version, status) in
[(1, MigrateResult::InProgress { steps_done: 1 }), (2, MigrateResult::Completed)]
{
assert_eq!(TestMigration::migrate(Weight::MAX).0, status);
assert_eq!(
<Pallet<Test>>::on_chain_storage_version(),
StorageVersion::new(version)
);
}
assert_eq!(TestMigration::migrate(Weight::MAX).0, MigrateResult::NoMigrationInProgress);
assert_eq!(StorageVersion::get::<Pallet<Test>>(), 2);
});
}
}
// Post checks always need to be run against the latest storage version. This is why we
// do not scope them in the per version modules. They always need to be ported to the latest
// version.
#[cfg(feature = "try-runtime")]
mod post_checks {
use super::*;
use crate::Determinism;
use sp_io::default_child_storage as child;
use v8::ContractInfo;
use v9::PrefabWasmModule;
#[storage_alias]
type CodeStorage<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, PrefabWasmModule>;
#[storage_alias]
type ContractInfoOf<T: Config, V> =
StorageMap<Pallet<T>, Twox64Concat, <T as frame_system::Config>::AccountId, V>;
pub fn post_upgrade<T: Config>(old_version: StorageVersion) -> Result<(), TryRuntimeError> {
if old_version < 7 {
return Ok(())
}
if old_version < 8 {
v8::<T>()?;
}
if old_version < 9 {
v9::<T>()?;
}
Ok(())
}
fn v8<T: Config>() -> Result<(), TryRuntimeError> {
use frame_support::traits::ReservableCurrency;
for (key, value) in ContractInfoOf::<T, ContractInfo<T>>::iter() {
let reserved = T::Currency::reserved_balance(&key);
let stored = value
.storage_base_deposit
.saturating_add(value.storage_byte_deposit)
.saturating_add(value.storage_item_deposit);
ensure!(reserved >= stored, "Reserved balance out of sync.");
let mut storage_bytes = 0u32;
let mut storage_items = 0u32;
let mut key = Vec::new();
while let Some(next) = child::next_key(&value.trie_id, &key) {
key = next;
let mut val_out = [];
let len = child::read(&value.trie_id, &key, &mut val_out, 0)
.expect("The loop conditions checks for existence of the key; qed");
storage_bytes.saturating_accrue(len);
storage_items.saturating_accrue(1);
}
ensure!(storage_bytes == value.storage_bytes, "Storage bytes do not match.");
ensure!(storage_items == value.storage_items, "Storage items do not match.");
}
Ok(())
}
fn v9<T: Config>() -> Result<(), TryRuntimeError> {
for value in CodeStorage::<T>::iter_values() {
ensure!(
value.determinism == Determinism::Enforced,
"All pre-existing codes need to be deterministic."
);
}
Ok(())
}
}