contracts: Multi block migrations (#14045)

* Frame Add translate_next

This works similarly to to `translate` but only translate a single entry.
This function will be useful in the context of multi-block migration.

* Move to lazy migration

* Updates

* simplify MockMigration

* wip

* wip

* add bench

* add bench

* fmt

* fix bench

* add .

* ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_contracts

* Apply suggestions from code review

Co-authored-by: Alexander Theißen <alex.theissen@me.com>

* Scalfold v10 / v11 fix tests

* PR comment

* tweak pub use

* wip

* wip

* wip

* misc merge master

* misc merge master

* wip

* rm tmp stuff

* wip

* wip

* wip

* wip

* wip

* fixes

* add state

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* wip

* fix

* fixed compilation

* clean up logs

* wip

* Revert "Frame Add translate_next"

This reverts commit 10318fc95c42b1f7f25efeb35e6d947ea02bed88.

* Fix v10 logic

* Apply suggestions from code review

Co-authored-by: Alexander Theißen <alex.theissen@me.com>

* wip

* fixes

* exercise del_queue

* bump sample size

* fmt

* wip

* blank line

* fix lint

* fix rustdoc job lint

* PR comment do not use dangerous into()

* Ad macros for updating mod visibility

* Add doc

* Add max_weight to integrity_test

* fix compilation

* Add no migration tests

* ".git/.scripts/commands/bench/bench.sh" pallet dev pallet_contracts

* fix clippy

* PR review

* Update frame/contracts/src/lib.rs

Co-authored-by: Sasha Gryaznov <hi@agryaznov.com>

* Fix master merge

* fix merge 2

* fix tryruntime

* fix lint

---------

Co-authored-by: Alexander Theißen <alex.theissen@me.com>
Co-authored-by: command-bot <>
Co-authored-by: Sasha Gryaznov <hi@agryaznov.com>
This commit is contained in:
PG Herveou
2023-05-31 16:19:31 +02:00
committed by GitHub
parent b43a1b0b55
commit c7c5fc709c
10 changed files with 2841 additions and 1482 deletions
@@ -21,7 +21,6 @@
mod code;
mod sandbox;
use self::{
code::{
body::{self, DynInstr::*},
@@ -31,12 +30,13 @@ use self::{
};
use crate::{
exec::{AccountIdOf, Key},
migration::{v10, v11, v9, Migrate},
wasm::CallFlags,
Pallet as Contracts, *,
};
use codec::{Encode, MaxEncodedLen};
use frame_benchmarking::v1::{account, benchmarks, whitelisted_caller};
use frame_support::weights::Weight;
use frame_support::{pallet_prelude::StorageVersion, weights::Weight};
use frame_system::RawOrigin;
use sp_runtime::{
traits::{Bounded, Hash},
@@ -234,6 +234,94 @@ benchmarks! {
Contracts::<T>::reinstrument_module(&mut module, &schedule)?;
}
// This benchmarks the v9 migration step. (update codeStorage)
#[pov_mode = Measured]
v9_migration_step {
let c in 0 .. Perbill::from_percent(49).mul_ceil(T::MaxCodeLen::get());
v9::store_old_dummy_code::<T>(c as usize);
let mut m = v9::Migration::<T>::default();
}: {
m.step();
}
// This benchmarks the v10 migration step. (use dedicated deposit_account)
#[pov_mode = Measured]
v10_migration_step {
let contract = <Contract<T>>::with_caller(
whitelisted_caller(), WasmModule::dummy(), vec![],
)?;
v10::store_old_contrat_info::<T>(contract.account_id.clone(), contract.info()?);
let mut m = v10::Migration::<T>::default();
}: {
m.step();
}
// This benchmarks the v11 migration step.
#[pov_mode = Measured]
v11_migration_step {
let k in 0 .. 1024;
v11::fill_old_queue::<T>(k as usize);
let mut m = v11::Migration::<T>::default();
}: {
m.step();
}
// This benchmarks the weight of executing Migration::migrate to execute a noop migration.
#[pov_mode = Measured]
migration_noop {
assert_eq!(StorageVersion::get::<Pallet<T>>(), 2);
}: {
Migration::<T>::migrate(Weight::MAX)
} verify {
assert_eq!(StorageVersion::get::<Pallet<T>>(), 2);
}
// This benchmarks the weight of executing Migration::migrate when there are no migration in progress.
#[pov_mode = Measured]
migrate {
StorageVersion::new(0).put::<Pallet<T>>();
<Migration::<T> as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade();
let origin: RawOrigin<<T as frame_system::Config>::AccountId> = RawOrigin::Signed(whitelisted_caller());
}: {
<Contracts<T>>::migrate(origin.into(), Weight::MAX).unwrap()
} verify {
assert_eq!(StorageVersion::get::<Pallet<T>>(), 1);
}
// This benchmarks the weight of running on_runtime_upgrade when there are no migration in progress.
#[pov_mode = Measured]
on_runtime_upgrade_noop {
assert_eq!(StorageVersion::get::<Pallet<T>>(), 2);
}: {
<Migration::<T> as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade()
} verify {
assert!(MigrationInProgress::<T>::get().is_none());
}
// This benchmarks the weight of running on_runtime_upgrade when there is a migration in progress.
#[pov_mode = Measured]
on_runtime_upgrade_in_progress {
StorageVersion::new(0).put::<Pallet<T>>();
let v = vec![42u8].try_into().ok();
MigrationInProgress::<T>::set(v.clone());
}: {
<Migration::<T> as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade()
} verify {
assert!(MigrationInProgress::<T>::get().is_some());
assert_eq!(MigrationInProgress::<T>::get(), v);
}
// This benchmarks the weight of running on_runtime_upgrade when there is a migration to process.
#[pov_mode = Measured]
on_runtime_upgrade {
StorageVersion::new(0).put::<Pallet<T>>();
}: {
<Migration::<T> as frame_support::traits::OnRuntimeUpgrade>::on_runtime_upgrade()
} verify {
assert!(MigrationInProgress::<T>::get().is_some());
}
// This benchmarks the overhead of loading a code of size `c` byte from storage and into
// the sandbox. This does **not** include the actual execution for which the gas meter
// is responsible. This is achieved by generating all code to the `deploy` function
+89 -6
View File
@@ -107,7 +107,10 @@ use crate::{
use codec::{Codec, Decode, Encode, HasCompact};
use environmental::*;
use frame_support::{
dispatch::{DispatchError, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo, RawOrigin},
dispatch::{
DispatchError, Dispatchable, GetDispatchInfo, Pays, PostDispatchInfo, RawOrigin,
WithPostDispatchInfo,
},
ensure,
error::BadOrigin,
traits::{
@@ -120,18 +123,18 @@ use frame_support::{
use frame_system::{ensure_signed, pallet_prelude::OriginFor, EventRecord, Pallet as System};
use pallet_contracts_primitives::{
Code, CodeUploadResult, CodeUploadReturnValue, ContractAccessError, ContractExecResult,
ContractInstantiateResult, ExecReturnValue, GetStorageResult, InstantiateReturnValue,
StorageDeposit,
ContractInstantiateResult, ContractResult, ExecReturnValue, GetStorageResult,
InstantiateReturnValue, StorageDeposit,
};
use scale_info::TypeInfo;
use smallvec::Array;
use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup};
use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup, Zero};
use sp_std::{fmt::Debug, marker::PhantomData, prelude::*};
pub use crate::{
address::{AddressGenerator, DefaultAddressGenerator},
exec::Frame,
migration::Migration,
migration::{MigrateSequence, Migration, NoopMigration},
pallet::*,
schedule::{HostFnWeights, InstructionWeights, Limits, Schedule},
wasm::Determinism,
@@ -179,7 +182,12 @@ pub mod pallet {
use frame_system::pallet_prelude::*;
/// The current storage version.
const STORAGE_VERSION: StorageVersion = StorageVersion::new(9);
#[cfg(not(any(test, feature = "runtime-benchmarks")))]
const STORAGE_VERSION: StorageVersion = StorageVersion::new(11);
/// Hard coded storage version for running tests that depend on the current storage version.
#[cfg(any(test, feature = "runtime-benchmarks"))]
const STORAGE_VERSION: StorageVersion = StorageVersion::new(2);
#[pallet::pallet]
#[pallet::storage_version(STORAGE_VERSION)]
@@ -316,11 +324,22 @@ pub mod pallet {
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_idle(_block: T::BlockNumber, remaining_weight: Weight) -> Weight {
use migration::MigrateResult::*;
let (result, weight) = Migration::<T>::migrate(remaining_weight);
let remaining_weight = remaining_weight.saturating_sub(weight);
if !matches!(result, Completed | NoMigrationInProgress) {
return weight
}
ContractInfo::<T>::process_deletion_queue_batch(remaining_weight)
.saturating_add(T::WeightInfo::on_process_deletion_queue_batch())
}
fn integrity_test() {
Migration::<T>::integrity_test();
// Total runtime memory limit
let max_runtime_mem: u32 = T::Schedule::get().limits.runtime_memory;
// Memory limits for a single contract:
@@ -499,6 +518,7 @@ pub mod pallet {
storage_deposit_limit: Option<<BalanceOf<T> as codec::HasCompact>::Type>,
determinism: Determinism,
) -> DispatchResult {
Migration::<T>::ensure_migrated()?;
let origin = ensure_signed(origin)?;
Self::bare_upload_code(origin, code, storage_deposit_limit.map(Into::into), determinism)
.map(|_| ())
@@ -514,6 +534,7 @@ pub mod pallet {
origin: OriginFor<T>,
code_hash: CodeHash<T>,
) -> DispatchResultWithPostInfo {
Migration::<T>::ensure_migrated()?;
let origin = ensure_signed(origin)?;
<PrefabWasmModule<T>>::remove(&origin, code_hash)?;
// we waive the fee because removing unused code is beneficial
@@ -537,6 +558,7 @@ pub mod pallet {
dest: AccountIdLookupOf<T>,
code_hash: CodeHash<T>,
) -> DispatchResult {
Migration::<T>::ensure_migrated()?;
ensure_root(origin)?;
let dest = T::Lookup::lookup(dest)?;
<ContractInfoOf<T>>::try_mutate(&dest, |contract| {
@@ -586,6 +608,7 @@ pub mod pallet {
storage_deposit_limit: Option<<BalanceOf<T> as codec::HasCompact>::Type>,
data: Vec<u8>,
) -> DispatchResultWithPostInfo {
Migration::<T>::ensure_migrated()?;
let common = CommonInput {
origin: Origin::from_runtime_origin(origin)?,
value,
@@ -645,6 +668,7 @@ pub mod pallet {
data: Vec<u8>,
salt: Vec<u8>,
) -> DispatchResultWithPostInfo {
Migration::<T>::ensure_migrated()?;
let code_len = code.len() as u32;
let data_len = data.len() as u32;
let salt_len = salt.len() as u32;
@@ -687,6 +711,7 @@ pub mod pallet {
data: Vec<u8>,
salt: Vec<u8>,
) -> DispatchResultWithPostInfo {
Migration::<T>::ensure_migrated()?;
let data_len = data.len() as u32;
let salt_len = salt.len() as u32;
let common = CommonInput {
@@ -709,6 +734,33 @@ pub mod pallet {
T::WeightInfo::instantiate(data_len, salt_len),
)
}
/// When a migration is in progress, this dispatchable can be used to run migration steps.
/// Calls that contribute to advancing the migration have their fees waived, as it's helpful
/// for the chain. Note that while the migration is in progress, the pallet will also
/// leverage the `on_idle` hooks to run migration steps.
#[pallet::call_index(9)]
#[pallet::weight(T::WeightInfo::migrate().saturating_add(*weight_limit))]
pub fn migrate(origin: OriginFor<T>, weight_limit: Weight) -> DispatchResultWithPostInfo {
use migration::MigrateResult::*;
ensure_signed(origin)?;
let weight_limit = weight_limit.saturating_add(T::WeightInfo::migrate());
let (result, weight) = Migration::<T>::migrate(weight_limit);
match result {
Completed =>
Ok(PostDispatchInfo { actual_weight: Some(weight), pays_fee: Pays::No }),
InProgress { steps_done, .. } if steps_done > 0 =>
Ok(PostDispatchInfo { actual_weight: Some(weight), pays_fee: Pays::No }),
InProgress { .. } =>
Ok(PostDispatchInfo { actual_weight: Some(weight), pays_fee: Pays::Yes }),
NoMigrationInProgress | NoMigrationPerformed => {
let err: DispatchError = <Error<T>>::NoMigrationPerformed.into();
Err(err.with_weight(T::WeightInfo::migrate()))
},
}
}
}
#[pallet::event]
@@ -861,6 +913,10 @@ pub mod pallet {
CodeRejected,
/// An indetermistic code was used in a context where this is not permitted.
Indeterministic,
/// A pending migration needs to complete before the extrinsic can be called.
MigrationInProgress,
/// Migrate dispatch call was attempted but no migration was performed.
NoMigrationPerformed,
}
/// A mapping from an original code hash to the original code, untouched by instrumentation.
@@ -920,6 +976,10 @@ pub mod pallet {
#[pallet::storage]
pub(crate) type DeletionQueueCounter<T: Config> =
StorageValue<_, DeletionQueueManager<T>, ValueQuery>;
#[pallet::storage]
pub(crate) type MigrationInProgress<T: Config> =
StorageValue<_, migration::Cursor, OptionQuery>;
}
/// The type of origins supported by the contracts pallet.
@@ -1210,6 +1270,21 @@ impl<T: Config> Invokable<T> for InstantiateInput<T> {
}
}
macro_rules! ensure_no_migration_in_progress {
() => {
if Migration::<T>::in_progress() {
return ContractResult {
gas_consumed: Zero::zero(),
gas_required: Zero::zero(),
storage_deposit: Default::default(),
debug_message: Vec::new(),
result: Err(Error::<T>::MigrationInProgress.into()),
events: None,
}
}
};
}
impl<T: Config> Pallet<T> {
/// Perform a call to a specified contract.
///
@@ -1234,6 +1309,8 @@ impl<T: Config> Pallet<T> {
collect_events: CollectEvents,
determinism: Determinism,
) -> ContractExecResult<BalanceOf<T>, EventRecordOf<T>> {
ensure_no_migration_in_progress!();
let mut debug_message = if matches!(debug, DebugInfo::UnsafeDebug) {
Some(DebugBufferVec::<T>::default())
} else {
@@ -1290,6 +1367,8 @@ impl<T: Config> Pallet<T> {
debug: DebugInfo,
collect_events: CollectEvents,
) -> ContractInstantiateResult<T::AccountId, BalanceOf<T>, EventRecordOf<T>> {
ensure_no_migration_in_progress!();
let mut debug_message = if debug == DebugInfo::UnsafeDebug {
Some(DebugBufferVec::<T>::default())
} else {
@@ -1333,6 +1412,7 @@ impl<T: Config> Pallet<T> {
storage_deposit_limit: Option<BalanceOf<T>>,
determinism: Determinism,
) -> CodeUploadResult<CodeHash<T>, BalanceOf<T>> {
Migration::<T>::ensure_migrated()?;
let schedule = T::Schedule::get();
let module = PrefabWasmModule::from_code(
code,
@@ -1353,6 +1433,9 @@ impl<T: Config> Pallet<T> {
/// Query storage of a specified contract under a specified key.
pub fn get_storage(address: T::AccountId, key: Vec<u8>) -> GetStorageResult {
if Migration::<T>::in_progress() {
return Err(ContractAccessError::MigrationInProgress)
}
let contract_info =
ContractInfoOf::<T>::get(&address).ok_or(ContractAccessError::DoesntExist)?;
+537 -410
View File
@@ -15,462 +15,589 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{BalanceOf, CodeHash, Config, Pallet, TrieId, Weight};
use codec::{Decode, Encode};
//! Migration framework for pallets.
/// Macro to include all migration modules.
/// We only want to make these modules public when `runtime-benchmarks` is
/// enabled, so we can access migration code in benchmarks.
macro_rules! use_modules {
($($module:ident),*) => {
$(
#[cfg(feature = "runtime-benchmarks")]
pub mod $module;
#[cfg(not(feature = "runtime-benchmarks"))]
mod $module;
)*
};
}
use_modules!(v9, v10, v11);
use crate::{weights::WeightInfo, Config, Error, MigrationInProgress, Pallet, Weight, LOG_TARGET};
use codec::{Codec, Decode};
use frame_support::{
codec,
pallet_prelude::*,
storage::migration,
storage_alias,
traits::{Get, OnRuntimeUpgrade},
Identity, Twox64Concat,
traits::{ConstU32, OnRuntimeUpgrade},
};
use sp_runtime::traits::Saturating;
use sp_std::{marker::PhantomData, prelude::*};
use sp_std::marker::PhantomData;
#[cfg(feature = "try-runtime")]
use sp_std::prelude::*;
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
const PROOF_ENCODE: &str = "Tuple::max_encoded_len() < Cursor::max_encoded_len()` is verified in `Self::integrity_test()`; qed";
const PROOF_DECODE: &str =
"We encode to the same type in this trait only. No other code touches this item; qed";
fn invalid_version(version: StorageVersion) -> ! {
panic!("Required migration {version:?} not supported by this runtime. This is a bug.");
}
/// The cursor used to store the state of the current migration step.
pub type Cursor = BoundedVec<u8, ConstU32<1024>>;
// In benchmark and tests we use noop migrations, to test and bench the migration framework itself.
#[cfg(not(any(feature = "runtime-benchmarks", test)))]
type Migrations<T> = (v9::Migration<T>, v10::Migration<T>, v11::Migration<T>);
/// IsFinished describes whether a migration is finished or not.
pub enum IsFinished {
Yes,
No,
}
/// A trait that allows to migrate storage from one version to another.
///
/// The migration is done in steps. The migration is finished when
/// `step()` returns `IsFinished::Yes`.
pub trait Migrate: Codec + MaxEncodedLen + Default {
/// Returns the version of the migration.
const VERSION: u16;
/// Returns the maximum weight that can be consumed in a single step.
fn max_step_weight() -> Weight;
/// Process one step of the migration.
///
/// Returns whether the migration is finished and the weight consumed.
fn step(&mut self) -> (IsFinished, Weight);
/// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater
/// than `max_block_weight`.
fn integrity_test(max_block_weight: Weight) {
if Self::max_step_weight().any_gt(max_block_weight) {
panic!(
"Invalid max_step_weight for Migration {}. Value should be lower than {}",
Self::VERSION,
max_block_weight
);
}
let len = <Self as MaxEncodedLen>::max_encoded_len();
let max = Cursor::bound();
if len > max {
panic!(
"Migration {} has size {} which is bigger than the maximum of {}",
Self::VERSION,
len,
max,
);
}
}
/// Execute some pre-checks prior to running the first step of this migration.
#[cfg(feature = "try-runtime")]
fn pre_upgrade_step() -> Result<Vec<u8>, TryRuntimeError> {
Ok(Vec::new())
}
/// Execute some post-checks after running the last step of this migration.
#[cfg(feature = "try-runtime")]
fn post_upgrade_step(_state: Vec<u8>) -> Result<(), TryRuntimeError> {
Ok(())
}
}
/// A noop migration that can be used when there is no migration to be done for a given version.
#[doc(hidden)]
#[derive(frame_support::DefaultNoBound, Encode, Decode, MaxEncodedLen)]
pub struct NoopMigration<const N: u16>;
impl<const N: u16> Migrate for NoopMigration<N> {
const VERSION: u16 = N;
fn max_step_weight() -> Weight {
Weight::zero()
}
fn step(&mut self) -> (IsFinished, Weight) {
log::debug!(target: LOG_TARGET, "Noop migration for version {}", N);
(IsFinished::Yes, Weight::zero())
}
}
mod private {
use crate::migration::Migrate;
pub trait Sealed {}
#[impl_trait_for_tuples::impl_for_tuples(10)]
#[tuple_types_custom_trait_bound(Migrate)]
impl Sealed for Tuple {}
}
/// Defines a sequence of migrations.
///
/// The sequence must be defined by a tuple of migrations, each of which must implement the
/// `Migrate` trait. Migrations must be ordered by their versions with no gaps.
pub trait MigrateSequence: private::Sealed {
/// Returns the range of versions that this migration can handle.
/// Migrations must be ordered by their versions with no gaps.
/// The following code will fail to compile:
///
/// The following code will fail to compile:
/// ```compile_fail
/// # use pallet_contracts::{NoopMigration, MigrateSequence};
/// let _ = <(NoopMigration<1>, NoopMigration<3>)>::VERSION_RANGE;
/// ```
/// The following code will compile:
/// ```
/// # use pallet_contracts::{NoopMigration, MigrateSequence};
/// let _ = <(NoopMigration<1>, NoopMigration<2>)>::VERSION_RANGE;
/// ```
const VERSION_RANGE: (u16, u16);
/// Returns the default cursor for the given version.
fn new(version: StorageVersion) -> Cursor;
#[cfg(feature = "try-runtime")]
fn pre_upgrade_step(_version: StorageVersion) -> Result<Vec<u8>, TryRuntimeError> {
Ok(Vec::new())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade_step(_version: StorageVersion, _state: Vec<u8>) -> Result<(), TryRuntimeError> {
Ok(())
}
/// Execute the migration step until the weight limit is reached.
fn steps(version: StorageVersion, cursor: &[u8], weight_left: &mut Weight) -> StepResult;
/// Verify that the migration step fits into `Cursor`, and that `max_step_weight` is not greater
/// than `max_block_weight`.
fn integrity_test(max_block_weight: Weight);
/// Returns whether migrating from `in_storage` to `target` is supported.
///
/// A migration is supported if (in_storage + 1, target) is contained by `VERSION_RANGE`.
fn is_upgrade_supported(in_storage: StorageVersion, target: StorageVersion) -> bool {
if in_storage == target {
return true
}
if in_storage > target {
return false
}
let (low, high) = Self::VERSION_RANGE;
let Some(first_supported) = low.checked_sub(1) else {
return false
};
in_storage >= first_supported && target == high
}
}
/// Performs all necessary migrations based on `StorageVersion`.
pub struct Migration<T: Config>(PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for Migration<T> {
fn on_runtime_upgrade() -> Weight {
let version = <Pallet<T>>::on_chain_storage_version();
#[cfg(not(any(feature = "runtime-benchmarks", test)))]
pub struct Migration<T: Config, M: MigrateSequence = Migrations<T>>(PhantomData<(T, M)>);
/// Custom migration for running runtime-benchmarks and tests.
#[cfg(any(feature = "runtime-benchmarks", test))]
pub struct Migration<T: Config, M: MigrateSequence = (NoopMigration<1>, NoopMigration<2>)>(
PhantomData<(T, M)>,
);
#[cfg(feature = "try-runtime")]
impl<T: Config, M: MigrateSequence> Migration<T, M> {
fn run_all_steps() -> Result<(), TryRuntimeError> {
let mut weight = Weight::zero();
if version < 4 {
v4::migrate::<T>(&mut weight);
let name = <Pallet<T>>::name();
loop {
let in_progress_version = <Pallet<T>>::on_chain_storage_version() + 1;
let state = M::pre_upgrade_step(in_progress_version)?;
let (status, w) = Self::migrate(Weight::MAX);
weight.saturating_accrue(w);
log::info!(
target: LOG_TARGET,
"{name}: Migration step {:?} weight = {}",
in_progress_version,
weight
);
M::post_upgrade_step(in_progress_version, state)?;
if matches!(status, MigrateResult::Completed) {
break
}
}
if version < 5 {
v5::migrate::<T>(&mut weight);
let name = <Pallet<T>>::name();
log::info!(target: LOG_TARGET, "{name}: Migration steps weight = {}", weight);
Ok(())
}
}
impl<T: Config, M: MigrateSequence> OnRuntimeUpgrade for Migration<T, M> {
fn on_runtime_upgrade() -> Weight {
let name = <Pallet<T>>::name();
let latest_version = <Pallet<T>>::current_storage_version();
let storage_version = <Pallet<T>>::on_chain_storage_version();
if storage_version == latest_version {
log::warn!(
target: LOG_TARGET,
"{name}: No Migration performed storage_version = latest_version = {:?}",
&storage_version
);
return T::WeightInfo::on_runtime_upgrade_noop()
}
if version < 6 {
v6::migrate::<T>(&mut weight);
// In case a migration is already in progress we create the next migration
// (if any) right when the current one finishes.
if Self::in_progress() {
log::warn!(
target: LOG_TARGET,
"{name}: Migration already in progress {:?}",
&storage_version
);
return T::WeightInfo::on_runtime_upgrade_in_progress()
}
if version < 7 {
v7::migrate::<T>(&mut weight);
}
log::info!(
target: LOG_TARGET,
"{name}: Upgrading storage from {storage_version:?} to {latest_version:?}.",
);
if version < 8 {
v8::migrate::<T>(&mut weight);
}
let cursor = M::new(storage_version + 1);
MigrationInProgress::<T>::set(Some(cursor));
if version < 9 {
v9::migrate::<T>(&mut weight);
}
#[cfg(feature = "try-runtime")]
Self::run_all_steps().unwrap();
StorageVersion::new(9).put::<Pallet<T>>();
weight.saturating_accrue(T::DbWeight::get().writes(1));
weight
return T::WeightInfo::on_runtime_upgrade()
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, TryRuntimeError> {
let version = <Pallet<T>>::on_chain_storage_version();
// We can't really do much here as our migrations do not happen during the runtime upgrade.
// Instead, we call the migrations `pre_upgrade` and `post_upgrade` hooks when we iterate
// over our migrations.
let storage_version = <Pallet<T>>::on_chain_storage_version();
let target_version = <Pallet<T>>::current_storage_version();
if version == 7 {
v8::pre_upgrade::<T>()?;
log::debug!(
target: LOG_TARGET,
"{}: Range supported {:?}, range requested {:?}",
<Pallet<T>>::name(),
M::VERSION_RANGE,
(storage_version, target_version)
);
ensure!(M::is_upgrade_supported(storage_version, target_version), "Unsupported upgrade");
Ok(Default::default())
}
}
/// The result of running the migration.
#[derive(Debug, PartialEq)]
pub enum MigrateResult {
/// No migration was performed
NoMigrationPerformed,
/// No migration currently in progress
NoMigrationInProgress,
/// A migration is in progress
InProgress { steps_done: u32 },
/// All migrations are completed
Completed,
}
/// The result of running a migration step.
#[derive(Debug, PartialEq)]
pub enum StepResult {
InProgress { cursor: Cursor, steps_done: u32 },
Completed { steps_done: u32 },
}
impl<T: Config, M: MigrateSequence> Migration<T, M> {
/// Verify that each migration's step of the MigrateSequence fits into `Cursor`.
pub(crate) fn integrity_test() {
let max_weight = <T as frame_system::Config>::BlockWeights::get().max_block;
M::integrity_test(max_weight)
}
/// Migrate
/// Return the weight used and whether or not a migration is in progress
pub(crate) fn migrate(weight_limit: Weight) -> (MigrateResult, Weight) {
let name = <Pallet<T>>::name();
let mut weight_left = weight_limit;
if weight_left.checked_reduce(T::WeightInfo::migrate()).is_none() {
return (MigrateResult::NoMigrationPerformed, Weight::zero())
}
Ok(version.encode())
}
MigrationInProgress::<T>::mutate_exists(|progress| {
let Some(cursor_before) = progress.as_mut() else {
return (MigrateResult::NoMigrationInProgress, T::WeightInfo::migration_noop())
};
#[cfg(feature = "try-runtime")]
fn post_upgrade(state: Vec<u8>) -> Result<(), TryRuntimeError> {
let version = Decode::decode(&mut state.as_ref()).map_err(|_| "Cannot decode version")?;
post_checks::post_upgrade::<T>(version)
}
}
// if a migration is running it is always upgrading to the next version
let storage_version = <Pallet<T>>::on_chain_storage_version();
let in_progress_version = storage_version + 1;
/// V4: `Schedule` is changed to be a config item rather than an in-storage value.
mod v4 {
use super::*;
log::info!(
target: LOG_TARGET,
"{name}: Migrating from {:?} to {:?},",
storage_version,
in_progress_version,
);
pub fn migrate<T: Config>(weight: &mut Weight) {
#[allow(deprecated)]
migration::remove_storage_prefix(<Pallet<T>>::name().as_bytes(), b"CurrentSchedule", b"");
weight.saturating_accrue(T::DbWeight::get().writes(1));
}
}
let result =
match M::steps(in_progress_version, cursor_before.as_ref(), &mut weight_left) {
StepResult::InProgress { cursor, steps_done } => {
*progress = Some(cursor);
MigrateResult::InProgress { steps_done }
},
StepResult::Completed { steps_done } => {
in_progress_version.put::<Pallet<T>>();
if <Pallet<T>>::current_storage_version() != in_progress_version {
log::info!(
target: LOG_TARGET,
"{name}: Next migration is {:?},",
in_progress_version + 1
);
*progress = Some(M::new(in_progress_version + 1));
MigrateResult::InProgress { steps_done }
} else {
log::info!(
target: LOG_TARGET,
"{name}: All migrations done. At version {:?},",
in_progress_version
);
*progress = None;
MigrateResult::Completed
}
},
};
/// V5: State rent is removed which obsoletes some fields in `ContractInfo`.
mod v5 {
use super::*;
type AliveContractInfo<T> =
RawAliveContractInfo<CodeHash<T>, BalanceOf<T>, <T as frame_system::Config>::BlockNumber>;
type TombstoneContractInfo<T> = RawTombstoneContractInfo<
<T as frame_system::Config>::Hash,
<T as frame_system::Config>::Hashing,
>;
#[derive(Decode)]
enum OldContractInfo<T: Config> {
Alive(AliveContractInfo<T>),
Tombstone(TombstoneContractInfo<T>),
}
#[derive(Decode)]
struct RawAliveContractInfo<CodeHash, Balance, BlockNumber> {
trie_id: TrieId,
_storage_size: u32,
_pair_count: u32,
code_hash: CodeHash,
_rent_allowance: Balance,
_rent_paid: Balance,
_deduct_block: BlockNumber,
_last_write: Option<BlockNumber>,
_reserved: Option<()>,
}
#[derive(Decode)]
struct RawTombstoneContractInfo<H, Hasher>(H, PhantomData<Hasher>);
#[derive(Decode)]
struct OldDeletedContract {
_pair_count: u32,
trie_id: TrieId,
}
pub type ContractInfo<T> = RawContractInfo<CodeHash<T>>;
#[derive(Encode, Decode)]
pub struct RawContractInfo<CodeHash> {
pub trie_id: TrieId,
pub code_hash: CodeHash,
pub _reserved: Option<()>,
}
#[derive(Encode, Decode)]
struct DeletedContract {
trie_id: TrieId,
}
#[storage_alias]
type ContractInfoOf<T: Config> = StorageMap<
Pallet<T>,
Twox64Concat,
<T as frame_system::Config>::AccountId,
ContractInfo<T>,
>;
#[storage_alias]
type DeletionQueue<T: Config> = StorageValue<Pallet<T>, Vec<DeletedContract>>;
pub fn migrate<T: Config>(weight: &mut Weight) {
<ContractInfoOf<T>>::translate(|_key, old: OldContractInfo<T>| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
match old {
OldContractInfo::Alive(old) => Some(ContractInfo::<T> {
trie_id: old.trie_id,
code_hash: old.code_hash,
_reserved: old._reserved,
}),
OldContractInfo::Tombstone(_) => None,
}
});
DeletionQueue::<T>::translate(|old: Option<Vec<OldDeletedContract>>| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
old.map(|old| old.into_iter().map(|o| DeletedContract { trie_id: o.trie_id }).collect())
(result, weight_limit.saturating_sub(weight_left))
})
.ok();
}
pub(crate) fn ensure_migrated() -> DispatchResult {
if Self::in_progress() {
Err(Error::<T>::MigrationInProgress.into())
} else {
Ok(())
}
}
pub(crate) fn in_progress() -> bool {
MigrationInProgress::<T>::exists()
}
}
/// V6: Added storage deposits
mod v6 {
use super::*;
#[impl_trait_for_tuples::impl_for_tuples(10)]
#[tuple_types_custom_trait_bound(Migrate)]
impl MigrateSequence for Tuple {
const VERSION_RANGE: (u16, u16) = {
let mut versions: (u16, u16) = (0, 0);
for_tuples!(
#(
match versions {
(0, 0) => {
versions = (Tuple::VERSION, Tuple::VERSION);
},
(min_version, last_version) if Tuple::VERSION == last_version + 1 => {
versions = (min_version, Tuple::VERSION);
},
_ => panic!("Migrations must be ordered by their versions with no gaps.")
}
)*
);
versions
};
#[derive(Encode, Decode)]
struct OldPrefabWasmModule {
#[codec(compact)]
instruction_weights_version: u32,
#[codec(compact)]
initial: u32,
#[codec(compact)]
maximum: u32,
#[codec(compact)]
refcount: u64,
_reserved: Option<()>,
code: Vec<u8>,
original_code_len: u32,
}
#[derive(Encode, Decode)]
pub struct PrefabWasmModule {
#[codec(compact)]
pub instruction_weights_version: u32,
#[codec(compact)]
pub initial: u32,
#[codec(compact)]
pub maximum: u32,
pub code: Vec<u8>,
}
use v5::ContractInfo as OldContractInfo;
#[derive(Encode, Decode)]
pub struct RawContractInfo<CodeHash, Balance> {
pub trie_id: TrieId,
pub code_hash: CodeHash,
pub storage_deposit: Balance,
}
#[derive(Encode, Decode)]
pub struct OwnerInfo<T: Config> {
owner: T::AccountId,
#[codec(compact)]
deposit: BalanceOf<T>,
#[codec(compact)]
refcount: u64,
}
pub type ContractInfo<T> = RawContractInfo<CodeHash<T>, BalanceOf<T>>;
#[storage_alias]
type ContractInfoOf<T: Config> = StorageMap<
Pallet<T>,
Twox64Concat,
<T as frame_system::Config>::AccountId,
ContractInfo<T>,
>;
#[storage_alias]
type CodeStorage<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, PrefabWasmModule>;
#[storage_alias]
type OwnerInfoOf<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, OwnerInfo<T>>;
pub fn migrate<T: Config>(weight: &mut Weight) {
<ContractInfoOf<T>>::translate(|_key, old: OldContractInfo<T>| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
Some(ContractInfo::<T> {
trie_id: old.trie_id,
code_hash: old.code_hash,
storage_deposit: Default::default(),
})
});
let nobody = T::AccountId::decode(&mut sp_runtime::traits::TrailingZeroInput::zeroes())
.expect("Infinite input; no dead input space; qed");
<CodeStorage<T>>::translate(|key, old: OldPrefabWasmModule| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2));
<OwnerInfoOf<T>>::insert(
key,
OwnerInfo {
refcount: old.refcount,
owner: nobody.clone(),
deposit: Default::default(),
},
);
Some(PrefabWasmModule {
instruction_weights_version: old.instruction_weights_version,
initial: old.initial,
maximum: old.maximum,
code: old.code,
})
});
}
}
/// Rename `AccountCounter` to `Nonce`.
mod v7 {
use super::*;
pub fn migrate<T: Config>(weight: &mut Weight) {
#[storage_alias]
type AccountCounter<T: Config> = StorageValue<Pallet<T>, u64, ValueQuery>;
#[storage_alias]
type Nonce<T: Config> = StorageValue<Pallet<T>, u64, ValueQuery>;
Nonce::<T>::set(AccountCounter::<T>::take());
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2))
}
}
/// Update `ContractInfo` with new fields that track storage deposits.
mod v8 {
use super::*;
use sp_io::default_child_storage as child;
use v6::ContractInfo as OldContractInfo;
#[derive(Encode, Decode)]
pub struct ContractInfo<T: Config> {
pub trie_id: TrieId,
pub code_hash: CodeHash<T>,
pub storage_bytes: u32,
pub storage_items: u32,
pub storage_byte_deposit: BalanceOf<T>,
pub storage_item_deposit: BalanceOf<T>,
pub storage_base_deposit: BalanceOf<T>,
}
#[storage_alias]
type ContractInfoOf<T: Config, V> =
StorageMap<Pallet<T>, Twox64Concat, <T as frame_system::Config>::AccountId, V>;
pub fn migrate<T: Config>(weight: &mut Weight) {
<ContractInfoOf<T, ContractInfo<T>>>::translate_values(|old: OldContractInfo<T>| {
// Count storage items of this contract
let mut storage_bytes = 0u32;
let mut storage_items = 0u32;
let mut key = Vec::new();
while let Some(next) = child::next_key(&old.trie_id, &key) {
key = next;
let mut val_out = [];
let len = child::read(&old.trie_id, &key, &mut val_out, 0)
.expect("The loop conditions checks for existence of the key; qed");
storage_bytes.saturating_accrue(len);
storage_items.saturating_accrue(1);
}
let storage_byte_deposit =
T::DepositPerByte::get().saturating_mul(storage_bytes.into());
let storage_item_deposit =
T::DepositPerItem::get().saturating_mul(storage_items.into());
let storage_base_deposit = old
.storage_deposit
.saturating_sub(storage_byte_deposit)
.saturating_sub(storage_item_deposit);
// Reads: One read for each storage item plus the contract info itself.
// Writes: Only the new contract info.
weight.saturating_accrue(
T::DbWeight::get().reads_writes(u64::from(storage_items) + 1, 1),
);
Some(ContractInfo {
trie_id: old.trie_id,
code_hash: old.code_hash,
storage_bytes,
storage_items,
storage_byte_deposit,
storage_item_deposit,
storage_base_deposit,
})
});
fn new(version: StorageVersion) -> Cursor {
for_tuples!(
#(
if version == Tuple::VERSION {
return Tuple::default().encode().try_into().expect(PROOF_ENCODE)
}
)*
);
invalid_version(version)
}
#[cfg(feature = "try-runtime")]
pub fn pre_upgrade<T: Config>() -> Result<(), TryRuntimeError> {
use frame_support::traits::ReservableCurrency;
for (key, value) in ContractInfoOf::<T, OldContractInfo<T>>::iter() {
let reserved = T::Currency::reserved_balance(&key);
ensure!(reserved >= value.storage_deposit, "Reserved balance out of sync.");
}
Ok(())
/// Execute the pre-checks of the step associated with this version.
fn pre_upgrade_step(version: StorageVersion) -> Result<Vec<u8>, TryRuntimeError> {
for_tuples!(
#(
if version == Tuple::VERSION {
return Tuple::pre_upgrade_step()
}
)*
);
invalid_version(version)
}
#[cfg(feature = "try-runtime")]
/// Execute the post-checks of the step associated with this version.
fn post_upgrade_step(version: StorageVersion, state: Vec<u8>) -> Result<(), TryRuntimeError> {
for_tuples!(
#(
if version == Tuple::VERSION {
return Tuple::post_upgrade_step(state)
}
)*
);
invalid_version(version)
}
fn steps(version: StorageVersion, mut cursor: &[u8], weight_left: &mut Weight) -> StepResult {
for_tuples!(
#(
if version == Tuple::VERSION {
let mut migration = <Tuple as Decode>::decode(&mut cursor)
.expect(PROOF_DECODE);
let max_weight = Tuple::max_step_weight();
let mut steps_done = 0;
while weight_left.all_gt(max_weight) {
let (finished, weight) = migration.step();
steps_done += 1;
weight_left.saturating_reduce(weight);
if matches!(finished, IsFinished::Yes) {
return StepResult::Completed{ steps_done }
}
}
return StepResult::InProgress{cursor: migration.encode().try_into().expect(PROOF_ENCODE), steps_done }
}
)*
);
invalid_version(version)
}
fn integrity_test(max_block_weight: Weight) {
for_tuples!(
#(
Tuple::integrity_test(max_block_weight);
)*
);
}
}
/// Update `CodeStorage` with the new `determinism` field.
mod v9 {
#[cfg(test)]
mod test {
use super::*;
use crate::Determinism;
use v6::PrefabWasmModule as OldPrefabWasmModule;
use crate::tests::{ExtBuilder, Test};
#[derive(Encode, Decode)]
pub struct PrefabWasmModule {
#[codec(compact)]
pub instruction_weights_version: u32,
#[codec(compact)]
pub initial: u32,
#[codec(compact)]
pub maximum: u32,
pub code: Vec<u8>,
pub determinism: Determinism,
#[derive(Default, Encode, Decode, MaxEncodedLen)]
struct MockMigration<const N: u16> {
// MockMigration<N> needs `N` steps to finish
count: u16,
}
#[storage_alias]
type CodeStorage<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, PrefabWasmModule>;
impl<const N: u16> Migrate for MockMigration<N> {
const VERSION: u16 = N;
fn max_step_weight() -> Weight {
Weight::from_all(1)
}
fn step(&mut self) -> (IsFinished, Weight) {
assert!(self.count != N);
self.count += 1;
if self.count == N {
(IsFinished::Yes, Weight::from_all(1))
} else {
(IsFinished::No, Weight::from_all(1))
}
}
}
pub fn migrate<T: Config>(weight: &mut Weight) {
<CodeStorage<T>>::translate_values(|old: OldPrefabWasmModule| {
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
Some(PrefabWasmModule {
instruction_weights_version: old.instruction_weights_version,
initial: old.initial,
maximum: old.maximum,
code: old.code,
determinism: Determinism::Enforced,
})
#[test]
fn version_range_works() {
let range = <(MockMigration<1>, MockMigration<2>)>::VERSION_RANGE;
assert_eq!(range, (1, 2));
}
#[test]
fn is_upgrade_supported_works() {
type M = (MockMigration<9>, MockMigration<10>, MockMigration<11>);
[(1, 1), (8, 11), (9, 11)].into_iter().for_each(|(from, to)| {
assert!(
M::is_upgrade_supported(StorageVersion::new(from), StorageVersion::new(to)),
"{} -> {} is supported",
from,
to
)
});
[(1, 0), (0, 3), (7, 11), (8, 10)].into_iter().for_each(|(from, to)| {
assert!(
!M::is_upgrade_supported(StorageVersion::new(from), StorageVersion::new(to)),
"{} -> {} is not supported",
from,
to
)
});
}
#[test]
fn steps_works() {
type M = (MockMigration<2>, MockMigration<3>);
let version = StorageVersion::new(2);
let mut cursor = M::new(version);
let mut weight = Weight::from_all(2);
let result = M::steps(version, &cursor, &mut weight);
cursor = vec![1u8, 0].try_into().unwrap();
assert_eq!(result, StepResult::InProgress { cursor: cursor.clone(), steps_done: 1 });
assert_eq!(weight, Weight::from_all(1));
let mut weight = Weight::from_all(2);
assert_eq!(
M::steps(version, &cursor, &mut weight),
StepResult::Completed { steps_done: 1 }
);
}
#[test]
fn no_migration_in_progress_works() {
type M = (MockMigration<1>, MockMigration<2>);
type TestMigration = Migration<Test, M>;
ExtBuilder::default().build().execute_with(|| {
assert_eq!(StorageVersion::get::<Pallet<Test>>(), 2);
assert_eq!(TestMigration::migrate(Weight::MAX).0, MigrateResult::NoMigrationInProgress)
});
}
#[test]
fn migration_works() {
type M = (MockMigration<1>, MockMigration<2>);
type TestMigration = Migration<Test, M>;
ExtBuilder::default().set_storage_version(0).build().execute_with(|| {
assert_eq!(StorageVersion::get::<Pallet<Test>>(), 0);
TestMigration::on_runtime_upgrade();
for (version, status) in
[(1, MigrateResult::InProgress { steps_done: 1 }), (2, MigrateResult::Completed)]
{
assert_eq!(TestMigration::migrate(Weight::MAX).0, status);
assert_eq!(
<Pallet<Test>>::on_chain_storage_version(),
StorageVersion::new(version)
);
}
assert_eq!(TestMigration::migrate(Weight::MAX).0, MigrateResult::NoMigrationInProgress);
assert_eq!(StorageVersion::get::<Pallet<Test>>(), 2);
});
}
}
// Post checks always need to be run against the latest storage version. This is why we
// do not scope them in the per version modules. They always need to be ported to the latest
// version.
#[cfg(feature = "try-runtime")]
mod post_checks {
use super::*;
use crate::Determinism;
use sp_io::default_child_storage as child;
use v8::ContractInfo;
use v9::PrefabWasmModule;
#[storage_alias]
type CodeStorage<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, PrefabWasmModule>;
#[storage_alias]
type ContractInfoOf<T: Config, V> =
StorageMap<Pallet<T>, Twox64Concat, <T as frame_system::Config>::AccountId, V>;
pub fn post_upgrade<T: Config>(old_version: StorageVersion) -> Result<(), TryRuntimeError> {
if old_version < 7 {
return Ok(())
}
if old_version < 8 {
v8::<T>()?;
}
if old_version < 9 {
v9::<T>()?;
}
Ok(())
}
fn v8<T: Config>() -> Result<(), TryRuntimeError> {
use frame_support::traits::ReservableCurrency;
for (key, value) in ContractInfoOf::<T, ContractInfo<T>>::iter() {
let reserved = T::Currency::reserved_balance(&key);
let stored = value
.storage_base_deposit
.saturating_add(value.storage_byte_deposit)
.saturating_add(value.storage_item_deposit);
ensure!(reserved >= stored, "Reserved balance out of sync.");
let mut storage_bytes = 0u32;
let mut storage_items = 0u32;
let mut key = Vec::new();
while let Some(next) = child::next_key(&value.trie_id, &key) {
key = next;
let mut val_out = [];
let len = child::read(&value.trie_id, &key, &mut val_out, 0)
.expect("The loop conditions checks for existence of the key; qed");
storage_bytes.saturating_accrue(len);
storage_items.saturating_accrue(1);
}
ensure!(storage_bytes == value.storage_bytes, "Storage bytes do not match.");
ensure!(storage_items == value.storage_items, "Storage items do not match.");
}
Ok(())
}
fn v9<T: Config>() -> Result<(), TryRuntimeError> {
for value in CodeStorage::<T>::iter_values() {
ensure!(
value.determinism == Determinism::Enforced,
"All pre-existing codes need to be deterministic."
);
}
Ok(())
}
}
@@ -0,0 +1,272 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Don't rely on reserved balances keeping an account alive
//! See <https://github.com/paritytech/substrate/pull/13370>.
use crate::{
address::AddressGenerator,
exec::AccountIdOf,
migration::{IsFinished, Migrate},
weights::WeightInfo,
BalanceOf, CodeHash, Config, Pallet, TrieId, Weight, LOG_TARGET,
};
use codec::{Decode, Encode};
use core::cmp::{max, min};
use frame_support::{
codec,
pallet_prelude::*,
storage_alias,
traits::{
fungible::Inspect,
tokens::{Fortitude::Polite, Preservation::Preserve},
Currency, ExistenceRequirement, ReservableCurrency,
},
DefaultNoBound,
};
use sp_core::hexdisplay::HexDisplay;
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
use sp_runtime::{traits::Zero, Perbill, Saturating};
use sp_std::{marker::PhantomData, ops::Deref, prelude::*};
mod old {
use super::*;
#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
#[scale_info(skip_type_params(T))]
pub struct ContractInfo<T: Config> {
pub trie_id: TrieId,
pub code_hash: CodeHash<T>,
pub storage_bytes: u32,
pub storage_items: u32,
pub storage_byte_deposit: BalanceOf<T>,
pub storage_item_deposit: BalanceOf<T>,
pub storage_base_deposit: BalanceOf<T>,
}
#[storage_alias]
pub type ContractInfoOf<T: Config> = StorageMap<
Pallet<T>,
Twox64Concat,
<T as frame_system::Config>::AccountId,
ContractInfo<T>,
>;
}
#[cfg(feature = "runtime-benchmarks")]
pub fn store_old_contrat_info<T: Config>(account: T::AccountId, info: crate::ContractInfo<T>) {
let info = old::ContractInfo {
trie_id: info.trie_id,
code_hash: info.code_hash,
storage_bytes: Default::default(),
storage_items: Default::default(),
storage_byte_deposit: Default::default(),
storage_item_deposit: Default::default(),
storage_base_deposit: Default::default(),
};
old::ContractInfoOf::<T>::insert(account, info);
}
#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebugNoBound, TypeInfo, MaxEncodedLen)]
#[scale_info(skip_type_params(T))]
pub struct DepositAccount<T: Config>(AccountIdOf<T>);
impl<T: Config> Deref for DepositAccount<T> {
type Target = AccountIdOf<T>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)]
#[scale_info(skip_type_params(T))]
pub struct ContractInfo<T: Config> {
pub trie_id: TrieId,
deposit_account: DepositAccount<T>,
pub code_hash: CodeHash<T>,
storage_bytes: u32,
storage_items: u32,
pub storage_byte_deposit: BalanceOf<T>,
storage_item_deposit: BalanceOf<T>,
storage_base_deposit: BalanceOf<T>,
}
#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)]
pub struct Migration<T: Config> {
last_key: Option<BoundedVec<u8, ConstU32<256>>>,
_phantom: PhantomData<T>,
}
#[storage_alias]
type ContractInfoOf<T: Config> =
StorageMap<Pallet<T>, Twox64Concat, <T as frame_system::Config>::AccountId, ContractInfo<T>>;
impl<T: Config> Migrate for Migration<T> {
const VERSION: u16 = 10;
fn max_step_weight() -> Weight {
T::WeightInfo::v10_migration_step()
}
fn step(&mut self) -> (IsFinished, Weight) {
let mut iter = if let Some(last_key) = self.last_key.take() {
old::ContractInfoOf::<T>::iter_from(last_key.to_vec())
} else {
old::ContractInfoOf::<T>::iter()
};
if let Some((account, contract)) = iter.next() {
let min_balance = Pallet::<T>::min_balance();
log::debug!(target: LOG_TARGET, "Account: 0x{} ", HexDisplay::from(&account.encode()));
// Store last key for next migration step
self.last_key = Some(iter.last_raw_key().to_vec().try_into().unwrap());
// Get the new deposit account address
let deposit_account: DepositAccount<T> =
DepositAccount(T::AddressGenerator::deposit_address(&account));
// Calculate the existing deposit, that should be reserved on the contract account
let old_deposit = contract
.storage_base_deposit
.saturating_add(contract.storage_item_deposit)
.saturating_add(contract.storage_byte_deposit);
// Unreserve the existing deposit
// Note we can't use repatriate_reserve, because it only works with existing accounts
let remaining = T::Currency::unreserve(&account, old_deposit);
if !remaining.is_zero() {
log::warn!(
target: LOG_TARGET,
"Partially unreserved. Remaining {:?} out of {:?} asked",
remaining,
old_deposit
);
}
// Attempt to transfer the old deposit to the deposit account.
let amount = old_deposit
.saturating_sub(min_balance)
.min(T::Currency::reducible_balance(&account, Preserve, Polite));
let new_deposit = T::Currency::transfer(
&account,
&deposit_account,
amount,
ExistenceRequirement::KeepAlive,
)
.map(|_| {
log::debug!(
target: LOG_TARGET,
"Transferred deposit ({:?}) to deposit account",
amount
);
amount
})
// If it fails we fallback to minting the ED.
.unwrap_or_else(|err| {
log::error!(target: LOG_TARGET, "Failed to transfer ED, reason: {:?}", err);
T::Currency::deposit_creating(&deposit_account, min_balance);
min_balance
});
// Calculate the new base_deposit to store in the contract:
// Ideally: it should be the same as the old one
// Ideally, it should be at least 2xED (for the contract and deposit account).
// It can't be more than the `new_deposit`.
let new_base_deposit = min(
max(contract.storage_base_deposit, min_balance.saturating_add(min_balance)),
new_deposit,
);
// Calculate the ratio to adjust storage_byte and storage_item deposits.
let new_deposit_without_base = new_deposit.saturating_sub(new_base_deposit);
let old_deposit_without_base =
old_deposit.saturating_sub(contract.storage_base_deposit);
let ratio = Perbill::from_rational(new_deposit_without_base, old_deposit_without_base);
// Calculate the new storage deposits based on the ratio
let storage_byte_deposit = ratio.mul_ceil(contract.storage_byte_deposit);
let storage_item_deposit = ratio.mul_ceil(contract.storage_item_deposit);
// Recalculate the new base deposit, instead of using new_base_deposit to avoid rounding
// errors
let storage_base_deposit = new_deposit
.saturating_sub(storage_byte_deposit)
.saturating_sub(storage_item_deposit);
let new_contract_info = ContractInfo {
trie_id: contract.trie_id,
deposit_account,
code_hash: contract.code_hash,
storage_bytes: contract.storage_bytes,
storage_items: contract.storage_items,
storage_byte_deposit,
storage_item_deposit,
storage_base_deposit,
};
ContractInfoOf::<T>::insert(&account, new_contract_info);
(IsFinished::No, T::WeightInfo::v10_migration_step())
} else {
log::debug!(target: LOG_TARGET, "Done Migrating contract info");
(IsFinished::Yes, T::WeightInfo::v10_migration_step())
}
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade_step() -> Result<Vec<u8>, TryRuntimeError> {
let sample: Vec<_> = old::ContractInfoOf::<T>::iter().take(10).collect();
log::debug!(target: LOG_TARGET, "Taking sample of {} contracts", sample.len());
Ok(sample.encode())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade_step(state: Vec<u8>) -> Result<(), TryRuntimeError> {
let sample =
<Vec<(T::AccountId, old::ContractInfo<T>)> as Decode>::decode(&mut &state[..]).unwrap();
log::debug!(target: LOG_TARGET, "Validating sample of {} contracts", sample.len());
for (account, old_contract) in sample {
log::debug!(target: LOG_TARGET, "===");
log::debug!(target: LOG_TARGET, "Account: 0x{} ", HexDisplay::from(&account.encode()));
let contract = ContractInfoOf::<T>::get(&account).unwrap();
ensure!(old_contract.trie_id == contract.trie_id, "invalid trie_id");
ensure!(old_contract.code_hash == contract.code_hash, "invalid code_hash");
ensure!(old_contract.storage_bytes == contract.storage_bytes, "invalid storage_bytes");
ensure!(old_contract.storage_items == contract.storage_items, "invalid storage_items");
let deposit =
<<T as Config>::Currency as frame_support::traits::Currency<_>>::total_balance(
&contract.deposit_account,
);
ensure!(
deposit ==
contract
.storage_base_deposit
.saturating_add(contract.storage_item_deposit)
.saturating_add(contract.storage_byte_deposit),
"deposit mismatch"
);
}
Ok(())
}
}
@@ -0,0 +1,130 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Overflowing bounded DeletionQueue.
//! See <https://github.com/paritytech/substrate/pull/13702>.
use crate::{
migration::{IsFinished, Migrate},
weights::WeightInfo,
Config, Pallet, TrieId, Weight, LOG_TARGET,
};
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
use codec::{Decode, Encode};
use frame_support::{codec, pallet_prelude::*, storage_alias, DefaultNoBound};
use sp_std::{marker::PhantomData, prelude::*};
mod old {
use super::*;
#[derive(Encode, Decode, TypeInfo, MaxEncodedLen)]
pub struct DeletedContract {
pub(crate) trie_id: TrieId,
}
#[storage_alias]
pub type DeletionQueue<T: Config> = StorageValue<Pallet<T>, Vec<DeletedContract>>;
}
#[derive(Encode, Decode, TypeInfo, MaxEncodedLen, DefaultNoBound, Clone)]
#[scale_info(skip_type_params(T))]
pub struct DeletionQueueManager<T: Config> {
insert_counter: u32,
delete_counter: u32,
_phantom: PhantomData<T>,
}
#[cfg(any(feature = "runtime-benchmarks", feature = "try-runtime"))]
pub fn fill_old_queue<T: Config>(len: usize) {
let queue: Vec<old::DeletedContract> =
core::iter::repeat_with(|| old::DeletedContract { trie_id: Default::default() })
.take(len)
.collect();
old::DeletionQueue::<T>::set(Some(queue));
}
#[storage_alias]
type DeletionQueue<T: Config> = StorageMap<Pallet<T>, Twox64Concat, u32, TrieId>;
#[storage_alias]
type DeletionQueueCounter<T: Config> = StorageValue<Pallet<T>, DeletionQueueManager<T>, ValueQuery>;
#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)]
pub struct Migration<T: Config> {
_phantom: PhantomData<T>,
}
impl<T: Config> Migrate for Migration<T> {
const VERSION: u16 = 11;
// It would be more correct to make our use the now removed [DeletionQueueDepth](https://github.com/paritytech/substrate/pull/13702/files#diff-70e9723e9db62816e35f6f885b6770a8449c75a6c2733e9fa7a245fe52c4656c)
// but in practice the queue is always empty, so 128 is a good enough approximation for not
// underestimating the weight of our migration.
fn max_step_weight() -> Weight {
T::WeightInfo::v11_migration_step(128)
}
fn step(&mut self) -> (IsFinished, Weight) {
let Some(old_queue) = old::DeletionQueue::<T>::take() else { return (IsFinished::Yes, Weight::zero()) };
let len = old_queue.len();
log::debug!(
target: LOG_TARGET,
"Migrating deletion queue with {} deleted contracts",
old_queue.len()
);
if !old_queue.is_empty() {
let mut queue = DeletionQueueManager::<T>::default();
for contract in old_queue {
<DeletionQueue<T>>::insert(queue.insert_counter, contract.trie_id);
queue.insert_counter += 1;
}
<DeletionQueueCounter<T>>::set(queue);
}
(IsFinished::Yes, T::WeightInfo::v11_migration_step(len as u32))
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade_step() -> Result<Vec<u8>, TryRuntimeError> {
let old_queue = old::DeletionQueue::<T>::take().unwrap_or_default();
if old_queue.is_empty() {
let len = 10u32;
log::debug!(
target: LOG_TARGET,
"Injecting {len} entries to deletion queue to test migration"
);
fill_old_queue::<T>(len as usize);
return Ok(len.encode())
}
Ok((old_queue.len() as u32).encode())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade_step(state: Vec<u8>) -> Result<(), TryRuntimeError> {
let len = <u32 as Decode>::decode(&mut &state[..]).unwrap();
let counter = <DeletionQueueCounter<T>>::get();
ensure!(counter.insert_counter == len, "invalid insert counter");
ensure!(counter.delete_counter == 0, "invalid delete counter");
Ok(())
}
}
@@ -0,0 +1,149 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Update `CodeStorage` with the new `determinism` field.
use crate::{
migration::{IsFinished, Migrate},
weights::WeightInfo,
CodeHash, Config, Determinism, Pallet, Weight, LOG_TARGET,
};
use codec::{Decode, Encode};
use frame_support::{
codec, pallet_prelude::*, storage_alias, BoundedVec, DefaultNoBound, Identity,
};
#[cfg(feature = "try-runtime")]
use sp_runtime::TryRuntimeError;
use sp_std::{marker::PhantomData, prelude::*};
mod old {
use super::*;
#[derive(Encode, Decode)]
pub struct PrefabWasmModule {
#[codec(compact)]
pub instruction_weights_version: u32,
#[codec(compact)]
pub initial: u32,
#[codec(compact)]
pub maximum: u32,
pub code: Vec<u8>,
}
#[storage_alias]
pub type CodeStorage<T: Config> =
StorageMap<Pallet<T>, Identity, CodeHash<T>, PrefabWasmModule>;
}
#[cfg(feature = "runtime-benchmarks")]
pub fn store_old_dummy_code<T: Config>(len: usize) {
use sp_runtime::traits::Hash;
let module = old::PrefabWasmModule {
instruction_weights_version: 0,
initial: 0,
maximum: 0,
code: vec![42u8; len],
};
let hash = T::Hashing::hash(&module.code);
old::CodeStorage::<T>::insert(hash, module);
}
#[derive(Encode, Decode)]
struct PrefabWasmModule {
#[codec(compact)]
pub instruction_weights_version: u32,
#[codec(compact)]
pub initial: u32,
#[codec(compact)]
pub maximum: u32,
pub code: Vec<u8>,
pub determinism: Determinism,
}
#[storage_alias]
type CodeStorage<T: Config> = StorageMap<Pallet<T>, Identity, CodeHash<T>, PrefabWasmModule>;
#[derive(Encode, Decode, MaxEncodedLen, DefaultNoBound)]
pub struct Migration<T: Config> {
last_key: Option<BoundedVec<u8, ConstU32<256>>>,
_phantom: PhantomData<T>,
}
impl<T: Config> Migrate for Migration<T> {
const VERSION: u16 = 9;
fn max_step_weight() -> Weight {
T::WeightInfo::v9_migration_step(T::MaxCodeLen::get())
}
fn step(&mut self) -> (IsFinished, Weight) {
let mut iter = if let Some(last_key) = self.last_key.take() {
old::CodeStorage::<T>::iter_from(last_key.to_vec())
} else {
old::CodeStorage::<T>::iter()
};
if let Some((key, old)) = iter.next() {
log::debug!(target: LOG_TARGET, "Migrating contract code {:?}", key);
let len = old.code.len() as u32;
let module = PrefabWasmModule {
instruction_weights_version: old.instruction_weights_version,
initial: old.initial,
maximum: old.maximum,
code: old.code,
determinism: Determinism::Enforced,
};
CodeStorage::<T>::insert(key, module);
self.last_key = Some(iter.last_raw_key().to_vec().try_into().unwrap());
(IsFinished::No, T::WeightInfo::v9_migration_step(len))
} else {
log::debug!(target: LOG_TARGET, "No more contracts code to migrate");
(IsFinished::Yes, T::WeightInfo::v9_migration_step(0))
}
}
#[cfg(feature = "try-runtime")]
fn pre_upgrade_step() -> Result<Vec<u8>, TryRuntimeError> {
let sample: Vec<_> = old::CodeStorage::<T>::iter().take(100).collect();
log::debug!(target: LOG_TARGET, "Taking sample of {} contract codes", sample.len());
Ok(sample.encode())
}
#[cfg(feature = "try-runtime")]
fn post_upgrade_step(state: Vec<u8>) -> Result<(), TryRuntimeError> {
let sample =
<Vec<(CodeHash<T>, old::PrefabWasmModule)> as Decode>::decode(&mut &state[..]).unwrap();
log::debug!(target: LOG_TARGET, "Validating sample of {} contract codes", sample.len());
for (code_hash, old) in sample {
let module = CodeStorage::<T>::get(&code_hash).unwrap();
ensure!(
module.instruction_weights_version == old.instruction_weights_version,
"invalid isntruction weights version"
);
ensure!(module.determinism == Determinism::Enforced, "invalid determinism");
ensure!(module.initial == old.initial, "invalid initial");
ensure!(module.maximum == old.maximum, "invalid maximum");
ensure!(module.code == old.code, "invalid code");
ensure!(module.maximum == old.maximum, "invalid maximum");
ensure!(module.code == old.code, "invalid code");
}
Ok(())
}
}
+74 -4
View File
@@ -28,7 +28,8 @@ use crate::{
wasm::{Determinism, PrefabWasmModule, ReturnCode as RuntimeReturnCode},
weights::WeightInfo,
BalanceOf, Code, CodeStorage, CollectEvents, Config, ContractInfo, ContractInfoOf, DebugInfo,
DefaultAddressGenerator, DeletionQueueCounter, Error, Origin, Pallet, Schedule,
DefaultAddressGenerator, DeletionQueueCounter, Error, MigrationInProgress, Origin, Pallet,
Schedule,
};
use assert_matches::assert_matches;
use codec::Encode;
@@ -39,7 +40,7 @@ use frame_support::{
storage::child,
traits::{
ConstU32, ConstU64, Contains, Currency, ExistenceRequirement, LockableCurrency, OnIdle,
OnInitialize, WithdrawReasons,
OnInitialize, StorageVersion, WithdrawReasons,
},
weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight},
};
@@ -438,10 +439,11 @@ pub const GAS_LIMIT: Weight = Weight::from_parts(100_000_000_000, 3 * 1024 * 102
pub struct ExtBuilder {
existential_deposit: u64,
storage_version: Option<StorageVersion>,
}
impl Default for ExtBuilder {
fn default() -> Self {
Self { existential_deposit: ExistentialDeposit::get() }
Self { existential_deposit: ExistentialDeposit::get(), storage_version: None }
}
}
impl ExtBuilder {
@@ -452,6 +454,10 @@ impl ExtBuilder {
pub fn set_associated_consts(&self) {
EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit);
}
pub fn set_storage_version(mut self, version: u16) -> Self {
self.storage_version = Some(StorageVersion::new(version));
self
}
pub fn build(self) -> sp_io::TestExternalities {
use env_logger::{Builder, Env};
let env = Env::new().default_filter_or("runtime=debug");
@@ -463,7 +469,15 @@ impl ExtBuilder {
.unwrap();
let mut ext = sp_io::TestExternalities::new(t);
ext.register_extension(KeystoreExt::new(MemoryKeystore::new()));
ext.execute_with(|| System::set_block_number(1));
ext.execute_with(|| {
use frame_support::traits::OnGenesis;
Pallet::<Test>::on_genesis();
if let Some(storage_version) = self.storage_version {
storage_version.put::<Pallet<Test>>();
}
System::set_block_number(1)
});
ext
}
}
@@ -544,6 +558,62 @@ fn calling_plain_account_fails() {
});
}
#[test]
fn migration_in_progress_works() {
let (wasm, code_hash) = compile_module::<Test>("dummy").unwrap();
ExtBuilder::default().existential_deposit(1).build().execute_with(|| {
let _ = Balances::deposit_creating(&ALICE, 1_000_000);
MigrationInProgress::<Test>::set(Some(Default::default()));
assert_err!(
Contracts::upload_code(
RuntimeOrigin::signed(ALICE),
vec![],
None,
Determinism::Enforced
),
Error::<Test>::MigrationInProgress,
);
assert_err!(
Contracts::remove_code(RuntimeOrigin::signed(ALICE), code_hash),
Error::<Test>::MigrationInProgress,
);
assert_err!(
Contracts::set_code(RuntimeOrigin::signed(ALICE), BOB.clone(), code_hash),
Error::<Test>::MigrationInProgress,
);
assert_err_ignore_postinfo!(
Contracts::call(RuntimeOrigin::signed(ALICE), BOB, 0, GAS_LIMIT, None, vec![],),
Error::<Test>::MigrationInProgress,
);
assert_err_ignore_postinfo!(
Contracts::instantiate_with_code(
RuntimeOrigin::signed(ALICE),
100_000,
GAS_LIMIT,
None,
wasm,
vec![],
vec![],
),
Error::<Test>::MigrationInProgress,
);
assert_err_ignore_postinfo!(
Contracts::instantiate(
RuntimeOrigin::signed(ALICE),
100_000,
GAS_LIMIT,
None,
code_hash,
vec![],
vec![],
),
Error::<Test>::MigrationInProgress,
);
});
}
#[test]
fn instantiate_and_call_and_deposit_event() {
let (wasm, code_hash) = compile_module::<Test>("event_and_return_on_deploy").unwrap();
+1489 -1059
View File
File diff suppressed because it is too large Load Diff