Safe and sane multi-item storage removal (#11490)

* Fix overlay prefix removal result

* Second part of the overlay prefix removal fix.

* Report only items deleted from storage in clear_prefix

* Fix kill_prefix

* Formatting

* Remove unused code

* Fixes

* Fixes

* Introduce clear_prefix host function v3

* Formatting

* Use v2 for now

* Fixes

* Formatting

* Docs

* Child prefix removal should also hide v3 for now

* Fixes

* Fixes

* Formatting

* Fixes

* apply_to_keys_whle takes start_at

* apply_to_keys_whle takes start_at

* apply_to_keys_whle takes start_at

* Cursor API; force limits

* Use unsafe deprecated functions

* Formatting

* Fixes

* Grumbles

* Fixes

* Docs

* Some nitpicks 🙈

* Update primitives/externalities/src/lib.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Formatting

* Fixes

* cargo fmt

* Fixes

* Update primitives/io/src/lib.rs

Co-authored-by: Keith Yeung <kungfukeith11@gmail.com>

* Formatting

* Fixes

Co-authored-by: Bastian Köcher <info@kchr.de>
Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
Co-authored-by: Keith Yeung <kungfukeith11@gmail.com>
This commit is contained in:
Gavin Wood
2022-05-29 12:56:26 +01:00
committed by GitHub
parent 189a310e4c
commit ecbd65fb95
45 changed files with 968 additions and 206 deletions
+1
View File
@@ -10070,6 +10070,7 @@ dependencies = [
name = "sp-state-machine"
version = "0.12.0"
dependencies = [
"assert_matches",
"hash-db",
"hex-literal",
"log",
+2 -1
View File
@@ -393,10 +393,11 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
if let Some(ref state) = *self.state.borrow() {
state.apply_to_keys_while(child_info, prefix, f)
state.apply_to_keys_while(child_info, prefix, start_at, f)
}
}
+2 -1
View File
@@ -226,9 +226,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.state.apply_to_keys_while(child_info, prefix, f)
self.state.apply_to_keys_while(child_info, prefix, start_at, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
+4 -2
View File
@@ -639,9 +639,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Cachin
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.state.apply_to_keys_while(child_info, prefix, f)
self.state.apply_to_keys_while(child_info, prefix, start_at, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
@@ -839,9 +840,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>>
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.caching_state().apply_to_keys_while(child_info, prefix, f)
self.caching_state().apply_to_keys_while(child_info, prefix, start_at, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
@@ -94,7 +94,9 @@ impl<T: Config<I>, I: 'static> List<T, I> {
/// this function should generally not be used in production as it could lead to a very large
/// number of storage accesses.
pub(crate) fn unsafe_clear() {
#[allow(deprecated)]
crate::ListBags::<T, I>::remove_all(None);
#[allow(deprecated)]
crate::ListNodes::<T, I>::remove_all();
}
@@ -56,6 +56,7 @@ mod v4 {
use super::*;
pub fn migrate<T: Config>() -> Weight {
#[allow(deprecated)]
migration::remove_storage_prefix(<Pallet<T>>::name().as_bytes(), b"CurrentSchedule", b"");
T::DbWeight::get().writes(1)
}
+7 -7
View File
@@ -27,12 +27,12 @@ use crate::{
use codec::{Decode, Encode, MaxEncodedLen};
use frame_support::{
dispatch::{DispatchError, DispatchResult},
storage::child::{self, ChildInfo, KillStorageResult},
storage::child::{self, ChildInfo},
weights::Weight,
};
use scale_info::TypeInfo;
use sp_core::crypto::UncheckedFrom;
use sp_io::hashing::blake2_256;
use sp_io::{hashing::blake2_256, KillStorageResult};
use sp_runtime::{
traits::{Hash, Zero},
RuntimeDebug,
@@ -266,16 +266,16 @@ where
while !queue.is_empty() && remaining_key_budget > 0 {
// Cannot panic due to loop condition
let trie = &mut queue[0];
let outcome =
child::kill_storage(&child_trie_info(&trie.trie_id), Some(remaining_key_budget));
#[allow(deprecated)]
let outcome = child::kill_storage(&child_trie_info(&trie.trie_id), Some(remaining_key_budget));
let keys_removed = match outcome {
// This happens when our budget wasn't large enough to remove all keys.
KillStorageResult::SomeRemaining(count) => count,
KillStorageResult::AllRemoved(count) => {
KillStorageResult::SomeRemaining(c) => c,
KillStorageResult::AllRemoved(c) => {
// We do not care to preserve order. The contract is deleted already and
// no one waits for the trie to be deleted.
queue.swap_remove(0);
count
c
},
};
remaining_key_budget = remaining_key_budget.saturating_sub(keys_removed);
@@ -148,6 +148,7 @@ fn clean<T: Config>() {
<Members<T>>::kill();
<Candidates<T>>::kill();
<RunnersUp<T>>::kill();
#[allow(deprecated)]
<Voting<T>>::remove_all(None);
}
+2
View File
@@ -900,7 +900,9 @@ impl<T: Config> OneSessionHandler<T::AccountId> for Pallet<T> {
// Remove all received heartbeats and number of authored blocks from the
// current session, they have already been processed and won't be needed
// anymore.
#[allow(deprecated)]
ReceivedHeartbeats::<T>::remove_prefix(&T::ValidatorSet::session_index(), None);
#[allow(deprecated)]
AuthoredBlocks::<T>::remove_prefix(&T::ValidatorSet::session_index(), None);
if offenders.is_empty() {
+2
View File
@@ -565,6 +565,7 @@ impl<T: Config> Pallet<T> {
},
);
#[allow(deprecated)]
frame_support::storage::migration::remove_storage_prefix(
Self::name().as_bytes(),
b"StorageVersion",
@@ -601,6 +602,7 @@ impl<T: Config> Pallet<T> {
)
});
#[allow(deprecated)]
frame_support::storage::migration::remove_storage_prefix(
Self::name().as_bytes(),
b"StorageVersion",
+3
View File
@@ -1067,6 +1067,7 @@ pub mod pallet {
Founder::<T, I>::kill();
Rules::<T, I>::kill();
Candidates::<T, I>::kill();
#[allow(deprecated)]
SuspendedCandidates::<T, I>::remove_all(None);
Self::deposit_event(Event::<T, I>::Unfounded { founder });
Ok(())
@@ -1511,6 +1512,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
.collect::<Vec<_>>();
// Clean up all votes.
#[allow(deprecated)]
<Votes<T, I>>::remove_all(None);
// Reward one of the voters who voted the right way.
@@ -1695,6 +1697,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
}
// Clean up all votes.
#[allow(deprecated)]
<DefenderVotes<T, I>>::remove_all(None);
}
@@ -585,8 +585,11 @@ impl<T: Config> Pallet<T> {
/// Clear all era information for given era.
pub(crate) fn clear_era_information(era_index: EraIndex) {
#[allow(deprecated)]
<ErasStakers<T>>::remove_prefix(era_index, None);
#[allow(deprecated)]
<ErasStakersClipped<T>>::remove_prefix(era_index, None);
#[allow(deprecated)]
<ErasValidatorPrefs<T>>::remove_prefix(era_index, None);
<ErasValidatorReward<T>>::remove(era_index);
<ErasRewardPoints<T>>::remove(era_index);
@@ -984,9 +987,13 @@ impl<T: Config> ElectionDataProvider for Pallet<T> {
#[cfg(feature = "runtime-benchmarks")]
fn clear() {
#[allow(deprecated)]
<Bonded<T>>::remove_all(None);
#[allow(deprecated)]
<Ledger<T>>::remove_all(None);
#[allow(deprecated)]
<Validators<T>>::remove_all();
#[allow(deprecated)]
<Nominators<T>>::remove_all();
T::VoterList::unsafe_clear();
@@ -1368,7 +1375,9 @@ impl<T: Config> SortedListProvider<T::AccountId> for UseNominatorsAndValidatorsM
fn unsafe_clear() {
// NOTE: Caller must ensure this doesn't lead to too many storage accesses. This is a
// condition of SortedListProvider::unsafe_clear.
#[allow(deprecated)]
Nominators::<T>::remove_all();
#[allow(deprecated)]
Validators::<T>::remove_all();
}
}
+2
View File
@@ -557,7 +557,9 @@ impl<'a, T: 'a + Config> Drop for InspectingSpans<'a, T> {
/// Clear slashing metadata for an obsolete era.
pub(crate) fn clear_era_metadata<T: Config>(obsolete_era: EraIndex) {
#[allow(deprecated)]
<Pallet<T> as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era, None);
#[allow(deprecated)]
<Pallet<T> as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era, None);
}
@@ -36,9 +36,11 @@ const SEED: u32 = 0;
/// This function removes all validators and nominators from storage.
pub fn clear_validators_and_nominators<T: Config>() {
#[allow(deprecated)]
Validators::<T>::remove_all();
// whenever we touch nominators counter we should update `T::VoterList` as well.
#[allow(deprecated)]
Nominators::<T>::remove_all();
// NOTE: safe to call outside block production
+6 -2
View File
@@ -1097,8 +1097,12 @@ pub mod tests {
DoubleMap::insert(&(key1 + 1), &key2, &4u64);
DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64);
assert!(matches!(
DoubleMap::remove_prefix(&key1, None),
sp_io::KillStorageResult::AllRemoved(0), // all in overlay
DoubleMap::clear_prefix(&key1, u32::max_value(), None),
// Note this is the incorrect answer (for now), since we are using v2 of
// `clear_prefix`.
// When we switch to v3, then this will become:
// sp_io::MultiRemovalResults::NoneLeft { db: 0, total: 2 },
sp_io::MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 },
));
assert_eq!(DoubleMap::get(&key1, &key2), 0u64);
assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64);
+54 -1
View File
@@ -21,7 +21,7 @@
// NOTE: could replace unhashed by having only one kind of storage (top trie being the child info
// of null length parent storage key).
pub use crate::sp_io::KillStorageResult;
pub use crate::sp_io::{KillStorageResult, MultiRemovalResults};
use crate::sp_std::prelude::*;
use codec::{Codec, Decode, Encode};
pub use sp_core::storage::{ChildInfo, ChildType, StateVersion};
@@ -136,6 +136,7 @@ pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool {
/// not make much sense because it is not cumulative when called inside the same block.
/// Use this function to distribute the deletion of a single child trie across multiple
/// blocks.
#[deprecated = "Use `clear_storage` instead"]
pub fn kill_storage(child_info: &ChildInfo, limit: Option<u32>) -> KillStorageResult {
match child_info.child_type() {
ChildType::ParentKeyId =>
@@ -143,6 +144,58 @@ pub fn kill_storage(child_info: &ChildInfo, limit: Option<u32>) -> KillStorageRe
}
}
/// Partially clear the child storage of each key-value pair.
///
/// # Limit
///
/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A *limit* of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// The limit can be used to partially delete storage items in case it is too large or costly
/// to delete all in a single operation.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any attempt to clear storage. In general, subsequent calls
/// operating on the same prefix should pass `Some` and this value should be equal to the
/// previous call result's `maybe_cursor` field. The only exception to this is when you can
/// guarantee that the subsequent call is in a new block; in this case the previous call's result
/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful
/// then making this call solely from a block-hook such as `on_initialize`.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the
/// resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given child storage, it is important that no keys further
/// keys are inserted. If so, then they may or may not be deleted by subsequent calls.
///
/// # Note
///
/// Please note that keys which are residing in the overlay for the child are deleted without
/// counting towards the `limit`.
pub fn clear_storage(
child_info: &ChildInfo,
maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
// TODO: Once the network has upgraded to include the new host functions, this code can be
// enabled.
// sp_io::default_child_storage::storage_kill(prefix, maybe_limit, maybe_cursor)
let r = match child_info.child_type() {
ChildType::ParentKeyId =>
sp_io::default_child_storage::storage_kill(child_info.storage_key(), maybe_limit),
};
use sp_io::KillStorageResult::*;
let (maybe_cursor, backend) = match r {
AllRemoved(db) => (None, db),
SomeRemaining(db) => (Some(child_info.storage_key().to_vec()), db),
};
MultiRemovalResults { maybe_cursor, backend, unique: backend, loops: backend }
}
/// Ensure `key` has no explicit entry in storage.
pub fn kill(child_info: &ChildInfo, key: &[u8]) {
match child_info.child_type() {
@@ -202,11 +202,28 @@ where
unhashed::kill(&Self::storage_double_map_final_key(k1, k2))
}
fn remove_prefix<KArg1>(k1: KArg1, limit: Option<u32>) -> sp_io::KillStorageResult
fn remove_prefix<KArg1>(k1: KArg1, maybe_limit: Option<u32>) -> sp_io::KillStorageResult
where
KArg1: EncodeLike<K1>,
{
unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref(), limit)
unhashed::clear_prefix(Self::storage_double_map_final_key1(k1).as_ref(), maybe_limit, None)
.into()
}
fn clear_prefix<KArg1>(
k1: KArg1,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
KArg1: EncodeLike<K1>,
{
unhashed::clear_prefix(
Self::storage_double_map_final_key1(k1).as_ref(),
Some(limit),
maybe_cursor,
)
.into()
}
fn iter_prefix_values<KArg1>(k1: KArg1) -> storage::PrefixIterator<V>
@@ -183,7 +183,22 @@ where
where
K: HasKeyPrefix<KP>,
{
unhashed::kill_prefix(&Self::storage_n_map_partial_key(partial_key), limit)
unhashed::clear_prefix(&Self::storage_n_map_partial_key(partial_key), limit, None).into()
}
fn clear_prefix<KP>(
partial_key: KP,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
K: HasKeyPrefix<KP>,
{
unhashed::clear_prefix(
&Self::storage_n_map_partial_key(partial_key),
Some(limit),
maybe_cursor,
)
}
fn iter_prefix_values<KP>(partial_key: KP) -> PrefixIterator<V>
@@ -256,12 +256,43 @@ pub fn put_storage_value<T: Encode>(module: &[u8], item: &[u8], hash: &[u8], val
/// Remove all items under a storage prefix by the `module`, the map's `item` name and the key
/// `hash`.
#[deprecated = "Use `clear_storage_prefix` instead"]
pub fn remove_storage_prefix(module: &[u8], item: &[u8], hash: &[u8]) {
let mut key = vec![0u8; 32 + hash.len()];
let storage_prefix = storage_prefix(module, item);
key[0..32].copy_from_slice(&storage_prefix);
key[32..].copy_from_slice(hash);
frame_support::storage::unhashed::kill_prefix(&key, None);
let _ = frame_support::storage::unhashed::clear_prefix(&key, None, None);
}
/// Attempt to remove all values under a storage prefix by the `module`, the map's `item` name and
/// the key `hash`.
///
/// All values in the client overlay will be deleted, if `maybe_limit` is `Some` then up to
/// that number of values are deleted from the client backend by seeking and reading that number of
/// storage values plus one. If `maybe_limit` is `None` then all values in the client backend are
/// deleted. This is potentially unsafe since it's an unbounded operation.
///
/// ## Cursors
///
/// The `maybe_cursor` parameter should be `None` for the first call to initial removal.
/// If the resultant `maybe_cursor` is `Some`, then another call is required to complete the
/// removal operation. This value must be passed in as the subsequent call's `maybe_cursor`
/// parameter. If the resultant `maybe_cursor` is `None`, then the operation is complete and no
/// items remain in storage provided that no items were added between the first calls and the
/// final call.
pub fn clear_storage_prefix(
module: &[u8],
item: &[u8],
hash: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults {
let mut key = vec![0u8; 32 + hash.len()];
let storage_prefix = storage_prefix(module, item);
key[0..32].copy_from_slice(&storage_prefix);
key[32..].copy_from_slice(hash);
frame_support::storage::unhashed::clear_prefix(&key, maybe_limit, maybe_cursor)
}
/// Take a particular item in storage by the `module`, the map's `item` name and the key `hash`.
+88 -4
View File
@@ -516,10 +516,34 @@ pub trait StorageDoubleMap<K1: FullEncode, K2: FullEncode, V: FullCodec> {
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear_prefix` instead"]
fn remove_prefix<KArg1>(k1: KArg1, limit: Option<u32>) -> sp_io::KillStorageResult
where
KArg1: ?Sized + EncodeLike<K1>;
/// Remove all values under the first key `k1` in the overlay and up to `maybe_limit` in the
/// backend.
///
/// All values in the client overlay will be deleted, if `maybe_limit` is `Some` then up to
/// that number of values are deleted from the client backend, otherwise all values in the
/// client backend are deleted.
///
/// ## Cursors
///
/// The `maybe_cursor` parameter should be `None` for the first call to initial removal.
/// If the resultant `maybe_cursor` is `Some`, then another call is required to complete the
/// removal operation. This value must be passed in as the subsequent call's `maybe_cursor`
/// parameter. If the resultant `maybe_cursor` is `None`, then the operation is complete and no
/// items remain in storage provided that no items were added between the first calls and the
/// final call.
fn clear_prefix<KArg1>(
k1: KArg1,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
KArg1: ?Sized + EncodeLike<K1>;
/// Iterate over values that share the first key.
fn iter_prefix_values<KArg1>(k1: KArg1) -> PrefixIterator<V>
where
@@ -657,10 +681,42 @@ pub trait StorageNMap<K: KeyGenerator, V: FullCodec> {
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear_prefix` instead"]
fn remove_prefix<KP>(partial_key: KP, limit: Option<u32>) -> sp_io::KillStorageResult
where
K: HasKeyPrefix<KP>;
/// Attempt to remove items from the map matching a `partial_key` prefix.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map which match the `partial key`. If so, then the map may not be
/// empty when the resultant `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must be provided in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map and `partial_key`. Subsequent
/// calls operating on the same map/`partial_key` should always pass `Some`, and this should be
/// equal to the previous call result's `maybe_cursor` field.
fn clear_prefix<KP>(
partial_key: KP,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
K: HasKeyPrefix<KP>;
/// Iterate over values that share the partial prefix key.
fn iter_prefix_values<KP>(partial_key: KP) -> PrefixIterator<V>
where
@@ -1111,8 +1167,36 @@ pub trait StoragePrefixedMap<Value: FullCodec> {
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear` instead"]
fn remove_all(limit: Option<u32>) -> sp_io::KillStorageResult {
sp_io::storage::clear_prefix(&Self::final_prefix(), limit)
unhashed::clear_prefix(&Self::final_prefix(), limit, None).into()
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults {
unhashed::clear_prefix(&Self::final_prefix(), Some(limit), maybe_cursor)
}
/// Iter over all value of the storage.
@@ -1427,7 +1511,7 @@ mod test {
assert_eq!(MyStorage::iter_values().collect::<Vec<_>>(), vec![1, 2, 3, 4]);
// test removal
MyStorage::remove_all(None);
let _ = MyStorage::clear(u32::max_value(), None);
assert!(MyStorage::iter_values().collect::<Vec<_>>().is_empty());
// test migration
@@ -1437,7 +1521,7 @@ mod test {
assert!(MyStorage::iter_values().collect::<Vec<_>>().is_empty());
MyStorage::translate_values(|v: u32| Some(v as u64));
assert_eq!(MyStorage::iter_values().collect::<Vec<_>>(), vec![1, 2]);
MyStorage::remove_all(None);
let _ = MyStorage::clear(u32::max_value(), None);
// test migration 2
unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u128);
@@ -1449,7 +1533,7 @@ mod test {
assert_eq!(MyStorage::iter_values().collect::<Vec<_>>(), vec![1, 2, 3]);
MyStorage::translate_values(|v: u128| Some(v as u64));
assert_eq!(MyStorage::iter_values().collect::<Vec<_>>(), vec![1, 2, 3]);
MyStorage::remove_all(None);
let _ = MyStorage::clear(u32::max_value(), None);
// test that other values are not modified.
assert_eq!(unhashed::get(&key_before[..]), Some(32u64));
@@ -31,6 +31,7 @@ use crate::{
Never,
};
use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen, Ref};
use sp_io::MultiRemovalResults;
use sp_runtime::traits::Saturating;
use sp_std::prelude::*;
@@ -273,13 +274,44 @@ where
<Self as MapWrapper>::Map::migrate_key::<OldHasher, _>(key)
}
/// Remove all value of the storage.
/// Remove all values in the map.
#[deprecated = "Use `clear` instead"]
pub fn remove_all() {
// NOTE: it is not possible to remove up to some limit because
// `sp_io::storage::clear_prefix` and `StorageMap::remove_all` don't give the number of
// value removed from the overlay.
CounterFor::<Prefix>::set(0u32);
#[allow(deprecated)]
<Self as MapWrapper>::Map::remove_all(None);
CounterFor::<Prefix>::kill();
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> MultiRemovalResults {
let result = <Self as MapWrapper>::Map::clear(limit, maybe_cursor);
match result.maybe_cursor {
None => CounterFor::<Prefix>::kill(),
Some(_) => CounterFor::<Prefix>::mutate(|x| x.saturating_reduce(result.unique)),
}
result
}
/// Iter over all value of the storage.
@@ -691,7 +723,7 @@ mod test {
assert_eq!(A::count(), 2);
// Remove all.
A::remove_all();
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::count(), 0);
assert_eq!(A::initialize_counter(), 0);
@@ -922,7 +954,7 @@ mod test {
assert_eq!(B::count(), 2);
// Remove all.
B::remove_all();
let _ = B::clear(u32::max_value(), None);
assert_eq!(B::count(), 0);
assert_eq!(B::initialize_counter(), 0);
@@ -229,13 +229,53 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear_prefix` instead"]
pub fn remove_prefix<KArg1>(k1: KArg1, limit: Option<u32>) -> sp_io::KillStorageResult
where
KArg1: ?Sized + EncodeLike<Key1>,
{
#[allow(deprecated)]
<Self as crate::storage::StorageDoubleMap<Key1, Key2, Value>>::remove_prefix(k1, limit)
}
/// Attempt to remove items from the map matching a `first_key` prefix.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map which match the `first_key`. If so, then the map may not be
/// empty when the resultant `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map and `first_key`. Subsequent
/// calls operating on the same map/`first_key` should always pass `Some`, and this should be
/// equal to the previous call result's `maybe_cursor` field.
pub fn clear_prefix<KArg1>(
first_key: KArg1,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
KArg1: ?Sized + EncodeLike<Key1>,
{
<Self as crate::storage::StorageDoubleMap<Key1, Key2, Value>>::clear_prefix(
first_key,
limit,
maybe_cursor,
)
}
/// Iterate over values that share the first key.
pub fn iter_prefix_values<KArg1>(k1: KArg1) -> crate::storage::PrefixIterator<Value>
where
@@ -359,10 +399,39 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear` instead"]
pub fn remove_all(limit: Option<u32>) -> sp_io::KillStorageResult {
#[allow(deprecated)]
<Self as crate::storage::StoragePrefixedMap<Value>>::remove_all(limit)
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen.A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults {
<Self as crate::storage::StoragePrefixedMap<Value>>::clear(limit, maybe_cursor)
}
/// Iter over all value of the storage.
///
/// NOTE: If a value failed to decode because storage is corrupted then it is skipped.
@@ -768,7 +837,7 @@ mod test {
A::insert(3, 30, 10);
A::insert(4, 40, 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key(3, 30), false);
assert_eq!(A::contains_key(4, 40), false);
@@ -829,7 +898,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len(3, 30), None);
WithLen::append(0, 100, 10);
assert_eq!(WithLen::decode_len(0, 100), Some(1));
@@ -843,7 +912,7 @@ mod test {
assert_eq!(A::iter_prefix_values(4).collect::<Vec<_>>(), vec![13, 14]);
assert_eq!(A::iter_prefix(4).collect::<Vec<_>>(), vec![(40, 13), (41, 14)]);
A::remove_prefix(3, None);
let _ = A::clear_prefix(3, u32::max_value(), None);
assert_eq!(A::iter_prefix(3).collect::<Vec<_>>(), vec![]);
assert_eq!(A::iter_prefix(4).collect::<Vec<_>>(), vec![(40, 13), (41, 14)]);
@@ -247,10 +247,39 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear` instead"]
pub fn remove_all(limit: Option<u32>) -> sp_io::KillStorageResult {
#[allow(deprecated)]
<Self as crate::storage::StoragePrefixedMap<Value>>::remove_all(limit)
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults {
<Self as crate::storage::StoragePrefixedMap<Value>>::clear(limit, maybe_cursor)
}
/// Iter over all value of the storage.
///
/// NOTE: If a value failed to decode because storage is corrupted then it is skipped.
@@ -563,7 +592,7 @@ mod test {
A::insert(3, 10);
A::insert(4, 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key(3), false);
assert_eq!(A::contains_key(4), false);
@@ -618,7 +647,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len(3), None);
WithLen::append(0, 10);
assert_eq!(WithLen::decode_len(0), Some(1));
+124 -28
View File
@@ -185,13 +185,53 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear_prefix` instead"]
pub fn remove_prefix<KP>(partial_key: KP, limit: Option<u32>) -> sp_io::KillStorageResult
where
Key: HasKeyPrefix<KP>,
{
#[allow(deprecated)]
<Self as crate::storage::StorageNMap<Key, Value>>::remove_prefix(partial_key, limit)
}
/// Attempt to remove items from the map matching a `partial_key` prefix.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map which match the `partial key`. If so, then the map may not be
/// empty when the resultant `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must be provided in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map and `partial_key`. Subsequent
/// calls operating on the same map/`partial_key` should always pass `Some`, and this should be
/// equal to the previous call result's `maybe_cursor` field.
pub fn clear_prefix<KP>(
partial_key: KP,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
Key: HasKeyPrefix<KP>,
{
<Self as crate::storage::StorageNMap<Key, Value>>::clear_prefix(
partial_key,
limit,
maybe_cursor,
)
}
/// Iterate over values that share the first key.
pub fn iter_prefix_values<KP>(partial_key: KP) -> PrefixIterator<Value>
where
@@ -299,8 +339,37 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear` instead"]
pub fn remove_all(limit: Option<u32>) -> sp_io::KillStorageResult {
<Self as crate::storage::StoragePrefixedMap<Value>>::remove_all(limit)
#[allow(deprecated)]
<Self as crate::storage::StoragePrefixedMap<Value>>::remove_all(limit).into()
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults {
<Self as crate::storage::StoragePrefixedMap<Value>>::clear(limit, maybe_cursor)
}
/// Iter over all value of the storage.
@@ -544,7 +613,7 @@ mod test {
use crate::{
hash::{StorageHasher as _, *},
metadata::{StorageEntryModifier, StorageHasher},
storage::types::{Key, ValueQuery},
storage::types::{Key as NMapKey, ValueQuery},
};
use sp_io::{hashing::twox_128, TestExternalities};
@@ -565,12 +634,12 @@ mod test {
#[test]
fn test_1_key() {
type A = StorageNMap<Prefix, Key<Blake2_128Concat, u16>, u32, OptionQuery>;
type A = StorageNMap<Prefix, NMapKey<Blake2_128Concat, u16>, u32, OptionQuery>;
type AValueQueryWithAnOnEmpty =
StorageNMap<Prefix, Key<Blake2_128Concat, u16>, u32, ValueQuery, ADefault>;
type B = StorageNMap<Prefix, Key<Blake2_256, u16>, u32, ValueQuery>;
type C = StorageNMap<Prefix, Key<Blake2_128Concat, u16>, u8, ValueQuery>;
type WithLen = StorageNMap<Prefix, Key<Blake2_128Concat, u16>, Vec<u32>>;
StorageNMap<Prefix, NMapKey<Blake2_128Concat, u16>, u32, ValueQuery, ADefault>;
type B = StorageNMap<Prefix, NMapKey<Blake2_256, u16>, u32, ValueQuery>;
type C = StorageNMap<Prefix, NMapKey<Blake2_128Concat, u16>, u8, ValueQuery>;
type WithLen = StorageNMap<Prefix, NMapKey<Blake2_128Concat, u16>, Vec<u32>>;
TestExternalities::default().execute_with(|| {
let mut k: Vec<u8> = vec![];
@@ -590,13 +659,13 @@ mod test {
{
#[crate::storage_alias]
type Foo = StorageNMap<test, (Key<Blake2_128Concat, u16>), u32>;
type Foo = StorageNMap<test, (NMapKey<Blake2_128Concat, u16>), u32>;
assert_eq!(Foo::contains_key((3,)), true);
assert_eq!(Foo::get((3,)), Some(10));
}
A::swap::<Key<Blake2_128Concat, u16>, _, _>((3,), (2,));
A::swap::<NMapKey<Blake2_128Concat, u16>, _, _>((3,), (2,));
assert_eq!(A::contains_key((3,)), false);
assert_eq!(A::contains_key((2,)), true);
assert_eq!(A::get((3,)), None);
@@ -684,7 +753,7 @@ mod test {
A::insert((3,), 10);
A::insert((4,), 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key((3,)), false);
assert_eq!(A::contains_key((4,)), false);
@@ -739,7 +808,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len((3,)), None);
WithLen::append((0,), 10);
assert_eq!(WithLen::decode_len((0,)), Some(1));
@@ -750,26 +819,30 @@ mod test {
fn test_2_keys() {
type A = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>),
(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>),
u32,
OptionQuery,
>;
type AValueQueryWithAnOnEmpty = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>),
(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>),
u32,
ValueQuery,
ADefault,
>;
type B = StorageNMap<Prefix, (Key<Blake2_256, u16>, Key<Twox128, u8>), u32, ValueQuery>;
type B =
StorageNMap<Prefix, (NMapKey<Blake2_256, u16>, NMapKey<Twox128, u8>), u32, ValueQuery>;
type C = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>),
(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>),
u8,
ValueQuery,
>;
type WithLen =
StorageNMap<Prefix, (Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>), Vec<u32>>;
type WithLen = StorageNMap<
Prefix,
(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>),
Vec<u32>,
>;
TestExternalities::default().execute_with(|| {
let mut k: Vec<u8> = vec![];
@@ -788,7 +861,10 @@ mod test {
assert_eq!(A::get((3, 30)), Some(10));
assert_eq!(AValueQueryWithAnOnEmpty::get((3, 30)), 10);
A::swap::<(Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>), _, _>((3, 30), (2, 20));
A::swap::<(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>), _, _>(
(3, 30),
(2, 20),
);
assert_eq!(A::contains_key((3, 30)), false);
assert_eq!(A::contains_key((2, 20)), true);
assert_eq!(A::get((3, 30)), None);
@@ -877,7 +953,7 @@ mod test {
A::insert((3, 30), 10);
A::insert((4, 40), 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key((3, 30)), false);
assert_eq!(A::contains_key((4, 40)), false);
@@ -938,7 +1014,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len((3, 30)), None);
WithLen::append((0, 100), 10);
assert_eq!(WithLen::decode_len((0, 100)), Some(1));
@@ -956,32 +1032,48 @@ mod test {
fn test_3_keys() {
type A = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
u32,
OptionQuery,
>;
type AValueQueryWithAnOnEmpty = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
u32,
ValueQuery,
ADefault,
>;
type B = StorageNMap<
Prefix,
(Key<Blake2_256, u16>, Key<Blake2_256, u16>, Key<Twox128, u16>),
(NMapKey<Blake2_256, u16>, NMapKey<Blake2_256, u16>, NMapKey<Twox128, u16>),
u32,
ValueQuery,
>;
type C = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
u8,
ValueQuery,
>;
type WithLen = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
Vec<u32>,
>;
@@ -1004,7 +1096,11 @@ mod test {
assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 30);
A::swap::<
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
_,
_,
>((1, 10, 100), (2, 20, 200));
@@ -1093,7 +1189,7 @@ mod test {
A::insert((3, 30, 300), 10);
A::insert((4, 40, 400), 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key((3, 30, 300)), false);
assert_eq!(A::contains_key((4, 40, 400)), false);
@@ -1161,7 +1257,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len((3, 30, 300)), None);
WithLen::append((0, 100, 1000), 10);
assert_eq!(WithLen::decode_len((0, 100, 1000)), Some(1));
@@ -96,10 +96,63 @@ pub fn kill(key: &[u8]) {
}
/// Ensure keys with the given `prefix` have no entries in storage.
#[deprecated = "Use `clear_prefix` instead"]
pub fn kill_prefix(prefix: &[u8], limit: Option<u32>) -> sp_io::KillStorageResult {
// TODO: Once the network has upgraded to include the new host functions, this code can be
// enabled.
// clear_prefix(prefix, limit).into()
sp_io::storage::clear_prefix(prefix, limit)
}
/// Partially clear the storage of all keys under a common `prefix`.
///
/// # Limit
///
/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A *limit* of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// The limit can be used to partially delete storage items in case it is too large or costly
/// to delete all in a single operation.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any attempt to clear storage. In general, subsequent calls
/// operating on the same prefix should pass `Some` and this value should be equal to the
/// previous call result's `maybe_cursor` field. The only exception to this is when you can
/// guarantee that the subsequent call is in a new block; in this case the previous call's result
/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful
/// then making this call solely from a block-hook such as `on_initialize`.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the
/// resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given child storage, it is important that no keys further
/// keys are inserted. If so, then they may or may not be deleted by subsequent calls.
///
/// # Note
///
/// Please note that keys which are residing in the overlay for the child are deleted without
/// counting towards the `limit`.
pub fn clear_prefix(
prefix: &[u8],
maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults {
// TODO: Once the network has upgraded to include the new host functions, this code can be
// enabled.
// sp_io::storage::clear_prefix(prefix, maybe_limit, maybe_cursor)
use sp_io::{KillStorageResult::*, MultiRemovalResults};
#[allow(deprecated)]
let (maybe_cursor, i) = match kill_prefix(prefix, maybe_limit) {
AllRemoved(i) => (None, i),
SomeRemaining(i) => (Some(prefix.to_vec()), i),
};
MultiRemovalResults { maybe_cursor, backend: i, unique: i, loops: i }
}
/// Get a Vec of bytes from storage.
pub fn get_raw(key: &[u8]) -> Option<Vec<u8>> {
sp_io::storage::get(key)
+2 -2
View File
@@ -475,7 +475,7 @@ pub mod pallet {
_subkeys: u32,
) -> DispatchResultWithPostInfo {
ensure_root(origin)?;
storage::unhashed::kill_prefix(&prefix, None);
let _ = storage::unhashed::clear_prefix(&prefix, None, None);
Ok(().into())
}
@@ -1427,7 +1427,7 @@ impl<T: Config> Pallet<T> {
pub fn reset_events() {
<Events<T>>::kill();
EventCount::<T>::kill();
<EventTopics<T>>::remove_all(None);
let _ = <EventTopics<T>>::clear(u32::max_value(), None);
}
/// Assert the given `event` exists.
+2
View File
@@ -110,8 +110,10 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
for (item, details) in Item::<T, I>::drain_prefix(&collection) {
Account::<T, I>::remove((&details.owner, &collection, &item));
}
#[allow(deprecated)]
ItemMetadataOf::<T, I>::remove_prefix(&collection, None);
CollectionMetadataOf::<T, I>::remove(&collection);
#[allow(deprecated)]
Attribute::<T, I>::remove_prefix((&collection,), None);
CollectionAccount::<T, I>::remove(&collection_details.owner, &collection);
T::Currency::unreserve(&collection_details.owner, collection_details.total_deposit);
+50 -11
View File
@@ -52,6 +52,30 @@ pub enum Error {
StorageUpdateFailed(&'static str),
}
/// Results concerning an operation to remove many keys.
#[derive(codec::Encode, codec::Decode)]
#[must_use]
pub struct MultiRemovalResults {
/// A continuation cursor which, if `Some` must be provided to the subsequent removal call.
/// If `None` then all removals are complete and no further calls are needed.
pub maybe_cursor: Option<Vec<u8>>,
/// The number of items removed from the backend database.
pub backend: u32,
/// The number of unique keys removed, taking into account both the backend and the overlay.
pub unique: u32,
/// The number of iterations (each requiring a storage seek/read) which were done.
pub loops: u32,
}
impl MultiRemovalResults {
/// Deconstruct into the internal components.
///
/// Returns `(maybe_cursor, backend, unique, loops)`.
pub fn deconstruct(self) -> (Option<Vec<u8>>, u32, u32, u32) {
(self.maybe_cursor, self.backend, self.unique, self.loops)
}
}
/// The Substrate externalities.
///
/// Provides access to the storage and to other registered extensions.
@@ -118,32 +142,47 @@ pub trait Externalities: ExtensionStore {
/// Clear an entire child storage.
///
/// Deletes all keys from the overlay and up to `limit` keys from the backend. No
/// limit is applied if `limit` is `None`. Returned boolean is `true` if the child trie was
/// removed completely and `false` if there are remaining keys after the function
/// returns. Returned `u32` is the number of keys that was removed at the end of the
/// operation.
/// Deletes all keys from the overlay and up to `maybe_limit` keys from the backend. No
/// limit is applied if `maybe_limit` is `None`. Returns the cursor for the next call as `Some`
/// if the child trie deletion operation is incomplete. In this case, it should be passed into
/// the next call to avoid unaccounted iterations on the backend. Returns also the the number
/// of keys that were removed from the backend, the number of unique keys removed in total
/// (including from the overlay) and the number of backend iterations done.
///
/// As long as `maybe_cursor` is passed from the result of the previous call, then the number of
/// iterations done will only ever be one more than the number of keys removed.
///
/// # Note
///
/// An implementation is free to delete more keys than the specified limit as long as
/// it is able to do that in constant time.
fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option<u32>) -> (bool, u32);
fn kill_child_storage(
&mut self,
child_info: &ChildInfo,
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults;
/// Clear storage entries which keys are start with the given prefix.
///
/// `limit` and result works as for `kill_child_storage`.
fn clear_prefix(&mut self, prefix: &[u8], limit: Option<u32>) -> (bool, u32);
/// `maybe_limit`, `maybe_cursor` and result works as for `kill_child_storage`.
fn clear_prefix(
&mut self,
prefix: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults;
/// Clear child storage entries which keys are start with the given prefix.
///
/// `limit` and result works as for `kill_child_storage`.
/// `maybe_limit`, `maybe_cursor` and result works as for `kill_child_storage`.
fn clear_child_prefix(
&mut self,
child_info: &ChildInfo,
prefix: &[u8],
limit: Option<u32>,
) -> (bool, u32);
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults;
/// Set or clear a storage entry (`key`) of current contract being called (effective
/// immediately).
+2 -2
View File
@@ -25,7 +25,7 @@ sp-state-machine = { version = "0.12.0", optional = true, path = "../state-machi
sp-wasm-interface = { version = "6.0.0", path = "../wasm-interface", default-features = false }
sp-runtime-interface = { version = "6.0.0", default-features = false, path = "../runtime-interface" }
sp-trie = { version = "6.0.0", optional = true, path = "../trie" }
sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" }
sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" }
sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" }
log = { version = "0.4.17", optional = true }
futures = { version = "0.3.21", features = ["thread-pool"], optional = true }
@@ -37,6 +37,7 @@ tracing-core = { version = "0.1.26", default-features = false}
[features]
default = ["std"]
std = [
"sp-externalities/std",
"sp-core/std",
"sp-keystore",
"codec/std",
@@ -47,7 +48,6 @@ std = [
"libsecp256k1",
"secp256k1",
"sp-runtime-interface/std",
"sp-externalities",
"sp-wasm-interface/std",
"sp-tracing/std",
"tracing/std",
+132 -20
View File
@@ -81,6 +81,8 @@ mod batch_verifier;
#[cfg(feature = "std")]
use batch_verifier::BatchVerifier;
pub use sp_externalities::MultiRemovalResults;
#[cfg(feature = "std")]
const LOG_TARGET: &str = "runtime::io";
@@ -99,12 +101,24 @@ pub enum EcdsaVerifyError {
/// removed from the backend from making the `storage_kill` call.
#[derive(PassByCodec, Encode, Decode)]
pub enum KillStorageResult {
/// All key to remove were removed, return number of key removed from backend.
/// All keys to remove were removed, return number of iterations performed during the
/// operation.
AllRemoved(u32),
/// Not all key to remove were removed, return number of key removed from backend.
/// Not all key to remove were removed, return number of iterations performed during the
/// operation.
SomeRemaining(u32),
}
impl From<MultiRemovalResults> for KillStorageResult {
fn from(r: MultiRemovalResults) -> Self {
match r {
MultiRemovalResults { maybe_cursor: None, backend, .. } => Self::AllRemoved(backend),
MultiRemovalResults { maybe_cursor: Some(..), backend, .. } =>
Self::SomeRemaining(backend),
}
}
}
/// Interface for accessing the storage from within the runtime.
#[runtime_interface]
pub trait Storage {
@@ -145,7 +159,7 @@ pub trait Storage {
/// Clear the storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(&mut self, prefix: &[u8]) {
let _ = Externalities::clear_prefix(*self, prefix, None);
let _ = Externalities::clear_prefix(*self, prefix, None, None);
}
/// Clear the storage of each key-value pair where the key starts with the given `prefix`.
@@ -175,13 +189,60 @@ pub trait Storage {
/// backend.
#[version(2)]
fn clear_prefix(&mut self, prefix: &[u8], limit: Option<u32>) -> KillStorageResult {
let (all_removed, num_removed) = Externalities::clear_prefix(*self, prefix, limit);
match all_removed {
true => KillStorageResult::AllRemoved(num_removed),
false => KillStorageResult::SomeRemaining(num_removed),
let r = Externalities::clear_prefix(*self, prefix, limit, None);
match r.maybe_cursor {
None => KillStorageResult::AllRemoved(r.loops),
Some(_) => KillStorageResult::SomeRemaining(r.loops),
}
}
/// Partially clear the storage of each key-value pair where the key starts with the given
/// prefix.
///
/// # Limit
///
/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A *limit* of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// The limit can be used to partially delete a prefix storage in case it is too large or costly
/// to delete in a single operation.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given `maybe_prefix` value. Subsequent calls
/// operating on the same prefix should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given prefix, it is important that no keys further
/// keys under the same prefix are inserted. If so, then they may or may not be deleted by
/// subsequent calls.
///
/// # Note
///
/// Please note that keys which are residing in the overlay for that prefix when
/// issuing this call are deleted without counting towards the `limit`.
#[version(3, register_only)]
fn clear_prefix(
&mut self,
maybe_prefix: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<Vec<u8>>, //< TODO Make work or just Option<Vec<u8>>?
) -> MultiRemovalResults {
Externalities::clear_prefix(
*self,
maybe_prefix,
maybe_limit,
maybe_cursor.as_ref().map(|x| &x[..]),
)
.into()
}
/// Append the encoded `value` to the storage item at `key`.
///
/// The storage item needs to implement [`EncodeAppend`](codec::EncodeAppend).
@@ -323,7 +384,7 @@ pub trait DefaultChildStorage {
/// is removed.
fn storage_kill(&mut self, storage_key: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
self.kill_child_storage(&child_info, None);
let _ = self.kill_child_storage(&child_info, None, None);
}
/// Clear a child storage key.
@@ -332,8 +393,8 @@ pub trait DefaultChildStorage {
#[version(2)]
fn storage_kill(&mut self, storage_key: &[u8], limit: Option<u32>) -> bool {
let child_info = ChildInfo::new_default(storage_key);
let (all_removed, _num_removed) = self.kill_child_storage(&child_info, limit);
all_removed
let r = self.kill_child_storage(&child_info, limit, None);
r.maybe_cursor.is_none()
}
/// Clear a child storage key.
@@ -342,13 +403,28 @@ pub trait DefaultChildStorage {
#[version(3)]
fn storage_kill(&mut self, storage_key: &[u8], limit: Option<u32>) -> KillStorageResult {
let child_info = ChildInfo::new_default(storage_key);
let (all_removed, num_removed) = self.kill_child_storage(&child_info, limit);
match all_removed {
true => KillStorageResult::AllRemoved(num_removed),
false => KillStorageResult::SomeRemaining(num_removed),
let r = self.kill_child_storage(&child_info, limit, None);
match r.maybe_cursor {
None => KillStorageResult::AllRemoved(r.loops),
Some(..) => KillStorageResult::SomeRemaining(r.loops),
}
}
/// Clear a child storage key.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
#[version(4, register_only)]
fn storage_kill(
&mut self,
storage_key: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<Vec<u8>>,
) -> MultiRemovalResults {
let child_info = ChildInfo::new_default(storage_key);
self.kill_child_storage(&child_info, maybe_limit, maybe_cursor.as_ref().map(|x| &x[..]))
.into()
}
/// Check a child storage key.
///
/// Check whether the given `key` exists in default child defined at `storage_key`.
@@ -362,7 +438,7 @@ pub trait DefaultChildStorage {
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(&mut self, storage_key: &[u8], prefix: &[u8]) {
let child_info = ChildInfo::new_default(storage_key);
let _ = self.clear_child_prefix(&child_info, prefix, None);
let _ = self.clear_child_prefix(&child_info, prefix, None, None);
}
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
@@ -376,13 +452,34 @@ pub trait DefaultChildStorage {
limit: Option<u32>,
) -> KillStorageResult {
let child_info = ChildInfo::new_default(storage_key);
let (all_removed, num_removed) = self.clear_child_prefix(&child_info, prefix, limit);
match all_removed {
true => KillStorageResult::AllRemoved(num_removed),
false => KillStorageResult::SomeRemaining(num_removed),
let r = self.clear_child_prefix(&child_info, prefix, limit, None);
match r.maybe_cursor {
None => KillStorageResult::AllRemoved(r.loops),
Some(..) => KillStorageResult::SomeRemaining(r.loops),
}
}
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
///
/// See `Storage` module `clear_prefix` documentation for `limit` usage.
#[version(3, register_only)]
fn clear_prefix(
&mut self,
storage_key: &[u8],
prefix: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<Vec<u8>>,
) -> MultiRemovalResults {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_prefix(
&child_info,
prefix,
maybe_limit,
maybe_cursor.as_ref().map(|x| &x[..]),
)
.into()
}
/// Default child root calculation.
///
/// "Commit" all existing operations and compute the resulting child storage root.
@@ -1757,15 +1854,30 @@ mod tests {
});
t.execute_with(|| {
// We can switch to this once we enable v3 of the `clear_prefix`.
//assert!(matches!(
// storage::clear_prefix(b":abc", None),
// MultiRemovalResults::NoneLeft { db: 2, total: 2 }
//));
assert!(matches!(
storage::clear_prefix(b":abc", None),
KillStorageResult::AllRemoved(2)
KillStorageResult::AllRemoved(2),
));
assert!(storage::get(b":a").is_some());
assert!(storage::get(b":abdd").is_some());
assert!(storage::get(b":abcd").is_none());
assert!(storage::get(b":abc").is_none());
// We can switch to this once we enable v3 of the `clear_prefix`.
//assert!(matches!(
// storage::clear_prefix(b":abc", None),
// MultiRemovalResults::NoneLeft { db: 0, total: 0 }
//));
assert!(matches!(
storage::clear_prefix(b":abc", None),
KillStorageResult::AllRemoved(0),
));
});
}
@@ -18,7 +18,7 @@ sp-wasm-interface = { version = "6.0.0", path = "../wasm-interface", default-fea
sp-std = { version = "4.0.0", default-features = false, path = "../std" }
sp-tracing = { version = "5.0.0", default-features = false, path = "../tracing" }
sp-runtime-interface-proc-macro = { version = "5.0.0", path = "proc-macro" }
sp-externalities = { version = "0.12.0", optional = true, path = "../externalities" }
sp-externalities = { version = "0.12.0", default-features = false, path = "../externalities" }
codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false }
static_assertions = "1.0.0"
primitive-types = { version = "0.11.1", default-features = false }
@@ -40,7 +40,7 @@ std = [
"sp-std/std",
"sp-tracing/std",
"codec/std",
"sp-externalities",
"sp-externalities/std",
"primitive-types/std",
]
@@ -548,3 +548,7 @@ impl PassBy for sp_storage::TrackedStorageKey {
impl PassBy for sp_storage::StateVersion {
type PassBy = Enum<Self>;
}
impl PassBy for sp_externalities::MultiRemovalResults {
type PassBy = Codec<Self>;
}
@@ -35,6 +35,7 @@ hex-literal = "0.3.4"
pretty_assertions = "1.2.1"
rand = "0.7.2"
sp-runtime = { version = "6.0.0", path = "../runtime" }
assert_matches = "1.5"
[features]
default = ["std"]
@@ -112,6 +112,7 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
);
+29 -17
View File
@@ -29,7 +29,7 @@ use sp_core::{
traits::Externalities,
Blake2Hasher,
};
use sp_externalities::{Extension, Extensions};
use sp_externalities::{Extension, Extensions, MultiRemovalResults};
use sp_trie::{empty_child_trie_root, HashKey, LayoutV0, LayoutV1, TrieConfiguration};
use std::{
any::{Any, TypeId},
@@ -209,23 +209,34 @@ impl Externalities for BasicExternalities {
}
}
fn kill_child_storage(&mut self, child_info: &ChildInfo, _limit: Option<u32>) -> (bool, u32) {
let num_removed = self
fn kill_child_storage(
&mut self,
child_info: &ChildInfo,
_maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
let count = self
.inner
.children_default
.remove(child_info.storage_key())
.map(|c| c.data.len())
.unwrap_or(0);
(true, num_removed as u32)
.unwrap_or(0) as u32;
MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count }
}
fn clear_prefix(&mut self, prefix: &[u8], _limit: Option<u32>) -> (bool, u32) {
fn clear_prefix(
&mut self,
prefix: &[u8],
_maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
if is_child_storage_key(prefix) {
warn!(
target: "trie",
"Refuse to clear prefix that is part of child storage key via main storage"
);
return (false, 0)
let maybe_cursor = Some(prefix.to_vec());
return MultiRemovalResults { maybe_cursor, backend: 0, unique: 0, loops: 0 }
}
let to_remove = self
@@ -237,19 +248,20 @@ impl Externalities for BasicExternalities {
.cloned()
.collect::<Vec<_>>();
let num_removed = to_remove.len();
let count = to_remove.len() as u32;
for key in to_remove {
self.inner.top.remove(&key);
}
(true, num_removed as u32)
MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count }
}
fn clear_child_prefix(
&mut self,
child_info: &ChildInfo,
prefix: &[u8],
_limit: Option<u32>,
) -> (bool, u32) {
_maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) {
let to_remove = child
.data
@@ -259,13 +271,13 @@ impl Externalities for BasicExternalities {
.cloned()
.collect::<Vec<_>>();
let num_removed = to_remove.len();
let count = to_remove.len() as u32;
for key in to_remove {
child.data.remove(&key);
}
(true, num_removed as u32)
MultiRemovalResults { maybe_cursor: None, backend: count, unique: count, loops: count }
} else {
(true, 0)
MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 }
}
}
@@ -434,7 +446,7 @@ mod tests {
ext.clear_child_storage(child_info, b"dog");
assert_eq!(ext.child_storage(child_info, b"dog"), None);
ext.kill_child_storage(child_info, None);
let _ = ext.kill_child_storage(child_info, None, None);
assert_eq!(ext.child_storage(child_info, b"doe"), None);
}
@@ -456,8 +468,8 @@ mod tests {
],
});
let res = ext.kill_child_storage(child_info, None);
assert_eq!(res, (true, 3));
let res = ext.kill_child_storage(child_info, None, None);
assert_eq!(res.deconstruct(), (None, 3, 3, 3));
}
#[test]
+63 -50
View File
@@ -27,7 +27,7 @@ use sp_core::hexdisplay::HexDisplay;
use sp_core::storage::{
well_known_keys::is_child_storage_key, ChildInfo, StateVersion, TrackedStorageKey,
};
use sp_externalities::{Extension, ExtensionStore, Externalities};
use sp_externalities::{Extension, ExtensionStore, Externalities, MultiRemovalResults};
use sp_trie::{empty_child_trie_root, LayoutV1};
use crate::{log_error, trace, warn, StorageTransactionCache};
@@ -433,7 +433,12 @@ where
self.overlay.set_child_storage(child_info, key, value);
}
fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option<u32>) -> (bool, u32) {
fn kill_child_storage(
&mut self,
child_info: &ChildInfo,
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
trace!(
target: "state",
method = "ChildKill",
@@ -442,11 +447,18 @@ where
);
let _guard = guard();
self.mark_dirty();
self.overlay.clear_child_storage(child_info);
self.limit_remove_from_backend(Some(child_info), None, limit)
let overlay = self.overlay.clear_child_storage(child_info);
let (maybe_cursor, backend, loops) =
self.limit_remove_from_backend(Some(child_info), None, maybe_limit, maybe_cursor);
MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops }
}
fn clear_prefix(&mut self, prefix: &[u8], limit: Option<u32>) -> (bool, u32) {
fn clear_prefix(
&mut self,
prefix: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
trace!(
target: "state",
method = "ClearPrefix",
@@ -460,20 +472,23 @@ where
target: "trie",
"Refuse to directly clear prefix that is part or contains of child storage key",
);
return (false, 0)
return MultiRemovalResults { maybe_cursor: None, backend: 0, unique: 0, loops: 0 }
}
self.mark_dirty();
self.overlay.clear_prefix(prefix);
self.limit_remove_from_backend(None, Some(prefix), limit)
let overlay = self.overlay.clear_prefix(prefix);
let (maybe_cursor, backend, loops) =
self.limit_remove_from_backend(None, Some(prefix), maybe_limit, maybe_cursor);
MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops }
}
fn clear_child_prefix(
&mut self,
child_info: &ChildInfo,
prefix: &[u8],
limit: Option<u32>,
) -> (bool, u32) {
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
trace!(
target: "state",
method = "ChildClearPrefix",
@@ -484,8 +499,14 @@ where
let _guard = guard();
self.mark_dirty();
self.overlay.clear_child_prefix(child_info, prefix);
self.limit_remove_from_backend(Some(child_info), Some(prefix), limit)
let overlay = self.overlay.clear_child_prefix(child_info, prefix);
let (maybe_cursor, backend, loops) = self.limit_remove_from_backend(
Some(child_info),
Some(prefix),
maybe_limit,
maybe_cursor,
);
MultiRemovalResults { maybe_cursor, backend, unique: overlay + backend, loops }
}
fn storage_append(&mut self, key: Vec<u8>, value: Vec<u8>) {
@@ -728,45 +749,37 @@ where
{
fn limit_remove_from_backend(
&mut self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
limit: Option<u32>,
) -> (bool, u32) {
let mut num_deleted: u32 = 0;
if let Some(limit) = limit {
let mut all_deleted = true;
self.backend.apply_to_keys_while(child_info, prefix, |key| {
if num_deleted == limit {
all_deleted = false;
maybe_child: Option<&ChildInfo>,
maybe_prefix: Option<&[u8]>,
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
) -> (Option<Vec<u8>>, u32, u32) {
let mut delete_count: u32 = 0;
let mut loop_count: u32 = 0;
let mut maybe_next_key = None;
self.backend
.apply_to_keys_while(maybe_child, maybe_prefix, maybe_cursor, |key| {
if maybe_limit.map_or(false, |limit| loop_count == limit) {
maybe_next_key = Some(key.to_vec());
return false
}
if let Some(num) = num_deleted.checked_add(1) {
num_deleted = num;
} else {
all_deleted = false;
return false
}
if let Some(child_info) = child_info {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
} else {
self.overlay.set_storage(key.to_vec(), None);
let overlay = match maybe_child {
Some(child_info) => self.overlay.child_storage(child_info, key),
None => self.overlay.storage(key),
};
if !matches!(overlay, Some(None)) {
// not pending deletion from the backend - delete it.
if let Some(child_info) = maybe_child {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
} else {
self.overlay.set_storage(key.to_vec(), None);
}
delete_count = delete_count.saturating_add(1);
}
loop_count = loop_count.saturating_add(1);
true
});
(all_deleted, num_deleted)
} else {
self.backend.apply_to_keys_while(child_info, prefix, |key| {
num_deleted = num_deleted.saturating_add(1);
if let Some(child_info) = child_info {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
} else {
self.overlay.set_storage(key.to_vec(), None);
}
true
});
(true, num_deleted)
}
(maybe_next_key, delete_count, loop_count)
}
}
@@ -1085,14 +1098,14 @@ mod tests {
not_under_prefix.extend(b"path");
ext.set_storage(not_under_prefix.clone(), vec![10]);
ext.clear_prefix(&[], None);
ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None);
let _ = ext.clear_prefix(&[], None, None);
let _ = ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None, None);
let mut under_prefix = well_known_keys::CHILD_STORAGE_KEY_PREFIX.to_vec();
under_prefix.extend(b"path");
ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None);
let _ = ext.clear_prefix(&well_known_keys::CHILD_STORAGE_KEY_PREFIX[..4], None, None);
assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![40]));
assert_eq!(ext.storage(not_under_prefix.as_slice()), Some(vec![10]));
ext.clear_prefix(&not_under_prefix[..5], None);
let _ = ext.clear_prefix(&not_under_prefix[..5], None, None);
assert_eq!(ext.storage(not_under_prefix.as_slice()), None);
}
+40 -12
View File
@@ -1348,6 +1348,7 @@ mod execution {
mod tests {
use super::{ext::Ext, *};
use crate::{execution::CallResult, in_memory_backend::new_in_mem_hash_key};
use assert_matches::assert_matches;
use codec::{Decode, Encode};
use sp_core::{
map,
@@ -1572,7 +1573,7 @@ mod tests {
{
let mut cache = StorageTransactionCache::default();
let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
ext.clear_prefix(b"ab", None);
let _ = ext.clear_prefix(b"ab", None, None);
}
overlay.commit_transaction().unwrap();
@@ -1596,7 +1597,10 @@ mod tests {
{
let mut cache = StorageTransactionCache::default();
let mut ext = Ext::new(&mut overlay, &mut cache, backend, None);
assert_eq!((false, 1), ext.clear_prefix(b"ab", Some(1)));
assert_matches!(
ext.clear_prefix(b"ab", Some(1), None).deconstruct(),
(Some(_), 1, 3, 1)
);
}
overlay.commit_transaction().unwrap();
@@ -1638,7 +1642,8 @@ mod tests {
{
let mut cache = StorageTransactionCache::default();
let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None);
assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2));
let r = ext.kill_child_storage(&child_info, Some(2), None);
assert_matches!(r.deconstruct(), (Some(_), 2, 6, 2));
}
assert_eq!(
@@ -1673,14 +1678,37 @@ mod tests {
let mut overlay = OverlayedChanges::default();
let mut cache = StorageTransactionCache::default();
let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None);
assert_eq!(ext.kill_child_storage(&child_info, Some(0)), (false, 0));
assert_eq!(ext.kill_child_storage(&child_info, Some(1)), (false, 1));
assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2));
assert_eq!(ext.kill_child_storage(&child_info, Some(3)), (false, 3));
assert_eq!(ext.kill_child_storage(&child_info, Some(4)), (true, 4));
// Only 4 items to remove
assert_eq!(ext.kill_child_storage(&child_info, Some(5)), (true, 4));
assert_eq!(ext.kill_child_storage(&child_info, None), (true, 4));
let r = ext.kill_child_storage(&child_info, Some(0), None).deconstruct();
assert_matches!(r, (Some(_), 0, 0, 0));
let r = ext
.kill_child_storage(&child_info, Some(1), r.0.as_ref().map(|x| &x[..]))
.deconstruct();
assert_matches!(r, (Some(_), 1, 1, 1));
let r = ext
.kill_child_storage(&child_info, Some(4), r.0.as_ref().map(|x| &x[..]))
.deconstruct();
// Only 3 items remaining to remove
assert_matches!(r, (None, 3, 3, 3));
let r = ext.kill_child_storage(&child_info, Some(1), None).deconstruct();
assert_matches!(r, (Some(_), 0, 0, 1));
}
#[test]
fn limited_child_kill_off_by_one_works_without_limit() {
let child_info = ChildInfo::new_default(b"sub1");
let initial: HashMap<_, BTreeMap<_, _>> = map![
Some(child_info.clone()) => map![
b"a".to_vec() => b"0".to_vec(),
b"b".to_vec() => b"1".to_vec(),
b"c".to_vec() => b"2".to_vec(),
b"d".to_vec() => b"3".to_vec()
],
];
let backend = InMemoryBackend::<BlakeTwo256>::from((initial, StateVersion::default()));
let mut overlay = OverlayedChanges::default();
let mut cache = StorageTransactionCache::default();
let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None);
assert_eq!(ext.kill_child_storage(&child_info, None, None).deconstruct(), (None, 4, 4, 4));
}
#[test]
@@ -1695,7 +1723,7 @@ mod tests {
ext.set_child_storage(child_info, b"abc".to_vec(), b"def".to_vec());
assert_eq!(ext.child_storage(child_info, b"abc"), Some(b"def".to_vec()));
ext.kill_child_storage(child_info, None);
let _ = ext.kill_child_storage(child_info, None, None);
assert_eq!(ext.child_storage(child_info, b"abc"), None);
}
@@ -410,10 +410,15 @@ impl OverlayedChangeSet {
&mut self,
predicate: impl Fn(&[u8], &OverlayedValue) -> bool,
at_extrinsic: Option<u32>,
) {
) -> u32 {
let mut count = 0;
for (key, val) in self.changes.iter_mut().filter(|(k, v)| predicate(k, v)) {
if val.value_ref().is_some() {
count += 1;
}
val.set(None, insert_dirty(&mut self.dirty_keys, key.clone()), at_extrinsic);
}
count
}
/// Get the iterator over all changes that follow the supplied `key`.
@@ -299,7 +299,7 @@ impl OverlayedChanges {
/// Clear child storage of given storage key.
///
/// Can be rolled back or committed when called inside a transaction.
pub(crate) fn clear_child_storage(&mut self, child_info: &ChildInfo) {
pub(crate) fn clear_child_storage(&mut self, child_info: &ChildInfo) -> u32 {
let extrinsic_index = self.extrinsic_index();
let storage_key = child_info.storage_key().to_vec();
let top = &self.top;
@@ -309,20 +309,20 @@ impl OverlayedChanges {
.or_insert_with(|| (top.spawn_child(), child_info.clone()));
let updatable = info.try_update(child_info);
debug_assert!(updatable);
changeset.clear_where(|_, _| true, extrinsic_index);
changeset.clear_where(|_, _| true, extrinsic_index)
}
/// Removes all key-value pairs which keys share the given prefix.
///
/// Can be rolled back or committed when called inside a transaction.
pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) {
self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index());
pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) -> u32 {
self.top.clear_where(|key, _| key.starts_with(prefix), self.extrinsic_index())
}
/// Removes all key-value pairs which keys share the given prefix.
///
/// Can be rolled back or committed when called inside a transaction
pub(crate) fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) {
pub(crate) fn clear_child_prefix(&mut self, child_info: &ChildInfo, prefix: &[u8]) -> u32 {
let extrinsic_index = self.extrinsic_index();
let storage_key = child_info.storage_key().to_vec();
let top = &self.top;
@@ -332,7 +332,7 @@ impl OverlayedChanges {
.or_insert_with(|| (top.spawn_child(), child_info.clone()));
let updatable = info.try_update(child_info);
debug_assert!(updatable);
changeset.clear_where(|key, _| key.starts_with(prefix), extrinsic_index);
changeset.clear_where(|key, _| key.starts_with(prefix), extrinsic_index)
}
/// Returns the current nesting depth of the transaction stack.
@@ -288,9 +288,10 @@ where
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.0.apply_to_keys_while(child_info, prefix, f)
self.0.apply_to_keys_while(child_info, prefix, start_at, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
@@ -25,6 +25,7 @@ use sp_core::{
traits::Externalities,
Blake2Hasher,
};
use sp_externalities::MultiRemovalResults;
use std::{
any::{Any, TypeId},
marker::PhantomData,
@@ -124,11 +125,21 @@ impl<'a, H: Hasher, B: 'a + Backend<H>> Externalities for ReadOnlyExternalities<
unimplemented!("place_child_storage not supported in ReadOnlyExternalities")
}
fn kill_child_storage(&mut self, _child_info: &ChildInfo, _limit: Option<u32>) -> (bool, u32) {
fn kill_child_storage(
&mut self,
_child_info: &ChildInfo,
_maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
unimplemented!("kill_child_storage is not supported in ReadOnlyExternalities")
}
fn clear_prefix(&mut self, _prefix: &[u8], _limit: Option<u32>) -> (bool, u32) {
fn clear_prefix(
&mut self,
_prefix: &[u8],
_maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
unimplemented!("clear_prefix is not supported in ReadOnlyExternalities")
}
@@ -136,8 +147,9 @@ impl<'a, H: Hasher, B: 'a + Backend<H>> Externalities for ReadOnlyExternalities<
&mut self,
_child_info: &ChildInfo,
_prefix: &[u8],
_limit: Option<u32>,
) -> (bool, u32) {
_maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
unimplemented!("clear_child_prefix is not supported in ReadOnlyExternalities")
}
@@ -381,7 +381,10 @@ mod tests {
{
let mut ext = ext.ext();
assert!(!ext.kill_child_storage(&child_info, Some(2)).0, "Should not delete all keys");
assert!(
ext.kill_child_storage(&child_info, Some(2), None).maybe_cursor.is_some(),
"Should not delete all keys"
);
assert!(ext.child_storage(&child_info, &b"doe"[..]).is_none());
assert!(ext.child_storage(&child_info, &b"dog"[..]).is_none());
@@ -123,9 +123,10 @@ where
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.essence.apply_to_keys_while(child_info, prefix, f)
self.essence.apply_to_keys_while(child_info, prefix, start_at, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
@@ -267,7 +267,8 @@ where
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
mut f: F,
start_at: Option<&[u8]>,
f: F,
) {
let root = if let Some(child_info) = child_info.as_ref() {
match self.child_root(child_info) {
@@ -283,7 +284,7 @@ where
self.root
};
self.trie_iter_key_inner(&root, prefix, |k| f(k), child_info)
self.trie_iter_key_inner(&root, prefix, f, child_info, start_at)
}
/// Execute given closure for all keys starting with prefix.
@@ -311,6 +312,7 @@ where
true
},
Some(child_info),
None,
)
}
@@ -324,28 +326,31 @@ where
true
},
None,
None,
)
}
fn trie_iter_key_inner<F: FnMut(&[u8]) -> bool>(
&self,
root: &H::Out,
prefix: Option<&[u8]>,
maybe_prefix: Option<&[u8]>,
mut f: F,
child_info: Option<&ChildInfo>,
maybe_start_at: Option<&[u8]>,
) {
let mut iter = move |db| -> sp_std::result::Result<(), Box<TrieError<H::Out>>> {
let trie = TrieDB::<H>::new(db, root)?;
let iter = if let Some(prefix) = prefix.as_ref() {
TrieDBKeyIterator::new_prefixed(&trie, prefix)?
} else {
TrieDBKeyIterator::new(&trie)?
};
let prefix = maybe_prefix.unwrap_or(&[]);
let iter = match maybe_start_at {
Some(start_at) =>
TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at),
None => TrieDBKeyIterator::new_prefixed(&trie, prefix),
}?;
for x in iter {
let key = x?;
debug_assert!(prefix
debug_assert!(maybe_prefix
.as_ref()
.map(|prefix| key.starts_with(prefix))
.unwrap_or(true));
@@ -22,7 +22,7 @@ use sp_core::{
storage::{ChildInfo, StateVersion, TrackedStorageKey},
traits::{Externalities, RuntimeSpawn, RuntimeSpawnExt, SpawnNamed, TaskExecutorExt},
};
use sp_externalities::{Extensions, ExternalitiesExt as _};
use sp_externalities::{Extensions, ExternalitiesExt as _, MultiRemovalResults};
use std::any::{Any, TypeId};
/// Simple state-less externalities for use in async context.
@@ -105,11 +105,21 @@ impl Externalities for AsyncExternalities {
panic!("`place_child_storage`: should not be used in async externalities!")
}
fn kill_child_storage(&mut self, _child_info: &ChildInfo, _limit: Option<u32>) -> (bool, u32) {
fn kill_child_storage(
&mut self,
_child_info: &ChildInfo,
_maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
panic!("`kill_child_storage`: should not be used in async externalities!")
}
fn clear_prefix(&mut self, _prefix: &[u8], _limit: Option<u32>) -> (bool, u32) {
fn clear_prefix(
&mut self,
_prefix: &[u8],
_maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
panic!("`clear_prefix`: should not be used in async externalities!")
}
@@ -117,8 +127,9 @@ impl Externalities for AsyncExternalities {
&mut self,
_child_info: &ChildInfo,
_prefix: &[u8],
_limit: Option<u32>,
) -> (bool, u32) {
_maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
panic!("`clear_child_prefix`: should not be used in async externalities!")
}