Safe and sane multi-item storage removal (#11490)

* Fix overlay prefix removal result

* Second part of the overlay prefix removal fix.

* Report only items deleted from storage in clear_prefix

* Fix kill_prefix

* Formatting

* Remove unused code

* Fixes

* Fixes

* Introduce clear_prefix host function v3

* Formatting

* Use v2 for now

* Fixes

* Formatting

* Docs

* Child prefix removal should also hide v3 for now

* Fixes

* Fixes

* Formatting

* Fixes

* apply_to_keys_whle takes start_at

* apply_to_keys_whle takes start_at

* apply_to_keys_whle takes start_at

* Cursor API; force limits

* Use unsafe deprecated functions

* Formatting

* Fixes

* Grumbles

* Fixes

* Docs

* Some nitpicks 🙈

* Update primitives/externalities/src/lib.rs

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Formatting

* Fixes

* cargo fmt

* Fixes

* Update primitives/io/src/lib.rs

Co-authored-by: Keith Yeung <kungfukeith11@gmail.com>

* Formatting

* Fixes

Co-authored-by: Bastian Köcher <info@kchr.de>
Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
Co-authored-by: Keith Yeung <kungfukeith11@gmail.com>
This commit is contained in:
Gavin Wood
2022-05-29 12:56:26 +01:00
committed by GitHub
parent 189a310e4c
commit ecbd65fb95
45 changed files with 968 additions and 206 deletions
+54 -1
View File
@@ -21,7 +21,7 @@
// NOTE: could replace unhashed by having only one kind of storage (top trie being the child info
// of null length parent storage key).
pub use crate::sp_io::KillStorageResult;
pub use crate::sp_io::{KillStorageResult, MultiRemovalResults};
use crate::sp_std::prelude::*;
use codec::{Codec, Decode, Encode};
pub use sp_core::storage::{ChildInfo, ChildType, StateVersion};
@@ -136,6 +136,7 @@ pub fn exists(child_info: &ChildInfo, key: &[u8]) -> bool {
/// not make much sense because it is not cumulative when called inside the same block.
/// Use this function to distribute the deletion of a single child trie across multiple
/// blocks.
#[deprecated = "Use `clear_storage` instead"]
pub fn kill_storage(child_info: &ChildInfo, limit: Option<u32>) -> KillStorageResult {
match child_info.child_type() {
ChildType::ParentKeyId =>
@@ -143,6 +144,58 @@ pub fn kill_storage(child_info: &ChildInfo, limit: Option<u32>) -> KillStorageRe
}
}
/// Partially clear the child storage of each key-value pair.
///
/// # Limit
///
/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A *limit* of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// The limit can be used to partially delete storage items in case it is too large or costly
/// to delete all in a single operation.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any attempt to clear storage. In general, subsequent calls
/// operating on the same prefix should pass `Some` and this value should be equal to the
/// previous call result's `maybe_cursor` field. The only exception to this is when you can
/// guarantee that the subsequent call is in a new block; in this case the previous call's result
/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful
/// then making this call solely from a block-hook such as `on_initialize`.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the
/// resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given child storage, it is important that no keys further
/// keys are inserted. If so, then they may or may not be deleted by subsequent calls.
///
/// # Note
///
/// Please note that keys which are residing in the overlay for the child are deleted without
/// counting towards the `limit`.
pub fn clear_storage(
child_info: &ChildInfo,
maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> MultiRemovalResults {
// TODO: Once the network has upgraded to include the new host functions, this code can be
// enabled.
// sp_io::default_child_storage::storage_kill(prefix, maybe_limit, maybe_cursor)
let r = match child_info.child_type() {
ChildType::ParentKeyId =>
sp_io::default_child_storage::storage_kill(child_info.storage_key(), maybe_limit),
};
use sp_io::KillStorageResult::*;
let (maybe_cursor, backend) = match r {
AllRemoved(db) => (None, db),
SomeRemaining(db) => (Some(child_info.storage_key().to_vec()), db),
};
MultiRemovalResults { maybe_cursor, backend, unique: backend, loops: backend }
}
/// Ensure `key` has no explicit entry in storage.
pub fn kill(child_info: &ChildInfo, key: &[u8]) {
match child_info.child_type() {
@@ -202,11 +202,28 @@ where
unhashed::kill(&Self::storage_double_map_final_key(k1, k2))
}
fn remove_prefix<KArg1>(k1: KArg1, limit: Option<u32>) -> sp_io::KillStorageResult
fn remove_prefix<KArg1>(k1: KArg1, maybe_limit: Option<u32>) -> sp_io::KillStorageResult
where
KArg1: EncodeLike<K1>,
{
unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref(), limit)
unhashed::clear_prefix(Self::storage_double_map_final_key1(k1).as_ref(), maybe_limit, None)
.into()
}
fn clear_prefix<KArg1>(
k1: KArg1,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
KArg1: EncodeLike<K1>,
{
unhashed::clear_prefix(
Self::storage_double_map_final_key1(k1).as_ref(),
Some(limit),
maybe_cursor,
)
.into()
}
fn iter_prefix_values<KArg1>(k1: KArg1) -> storage::PrefixIterator<V>
@@ -183,7 +183,22 @@ where
where
K: HasKeyPrefix<KP>,
{
unhashed::kill_prefix(&Self::storage_n_map_partial_key(partial_key), limit)
unhashed::clear_prefix(&Self::storage_n_map_partial_key(partial_key), limit, None).into()
}
fn clear_prefix<KP>(
partial_key: KP,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
K: HasKeyPrefix<KP>,
{
unhashed::clear_prefix(
&Self::storage_n_map_partial_key(partial_key),
Some(limit),
maybe_cursor,
)
}
fn iter_prefix_values<KP>(partial_key: KP) -> PrefixIterator<V>
@@ -256,12 +256,43 @@ pub fn put_storage_value<T: Encode>(module: &[u8], item: &[u8], hash: &[u8], val
/// Remove all items under a storage prefix by the `module`, the map's `item` name and the key
/// `hash`.
#[deprecated = "Use `clear_storage_prefix` instead"]
pub fn remove_storage_prefix(module: &[u8], item: &[u8], hash: &[u8]) {
let mut key = vec![0u8; 32 + hash.len()];
let storage_prefix = storage_prefix(module, item);
key[0..32].copy_from_slice(&storage_prefix);
key[32..].copy_from_slice(hash);
frame_support::storage::unhashed::kill_prefix(&key, None);
let _ = frame_support::storage::unhashed::clear_prefix(&key, None, None);
}
/// Attempt to remove all values under a storage prefix by the `module`, the map's `item` name and
/// the key `hash`.
///
/// All values in the client overlay will be deleted, if `maybe_limit` is `Some` then up to
/// that number of values are deleted from the client backend by seeking and reading that number of
/// storage values plus one. If `maybe_limit` is `None` then all values in the client backend are
/// deleted. This is potentially unsafe since it's an unbounded operation.
///
/// ## Cursors
///
/// The `maybe_cursor` parameter should be `None` for the first call to initial removal.
/// If the resultant `maybe_cursor` is `Some`, then another call is required to complete the
/// removal operation. This value must be passed in as the subsequent call's `maybe_cursor`
/// parameter. If the resultant `maybe_cursor` is `None`, then the operation is complete and no
/// items remain in storage provided that no items were added between the first calls and the
/// final call.
pub fn clear_storage_prefix(
module: &[u8],
item: &[u8],
hash: &[u8],
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults {
let mut key = vec![0u8; 32 + hash.len()];
let storage_prefix = storage_prefix(module, item);
key[0..32].copy_from_slice(&storage_prefix);
key[32..].copy_from_slice(hash);
frame_support::storage::unhashed::clear_prefix(&key, maybe_limit, maybe_cursor)
}
/// Take a particular item in storage by the `module`, the map's `item` name and the key `hash`.
+88 -4
View File
@@ -516,10 +516,34 @@ pub trait StorageDoubleMap<K1: FullEncode, K2: FullEncode, V: FullCodec> {
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear_prefix` instead"]
fn remove_prefix<KArg1>(k1: KArg1, limit: Option<u32>) -> sp_io::KillStorageResult
where
KArg1: ?Sized + EncodeLike<K1>;
/// Remove all values under the first key `k1` in the overlay and up to `maybe_limit` in the
/// backend.
///
/// All values in the client overlay will be deleted, if `maybe_limit` is `Some` then up to
/// that number of values are deleted from the client backend, otherwise all values in the
/// client backend are deleted.
///
/// ## Cursors
///
/// The `maybe_cursor` parameter should be `None` for the first call to initial removal.
/// If the resultant `maybe_cursor` is `Some`, then another call is required to complete the
/// removal operation. This value must be passed in as the subsequent call's `maybe_cursor`
/// parameter. If the resultant `maybe_cursor` is `None`, then the operation is complete and no
/// items remain in storage provided that no items were added between the first calls and the
/// final call.
fn clear_prefix<KArg1>(
k1: KArg1,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
KArg1: ?Sized + EncodeLike<K1>;
/// Iterate over values that share the first key.
fn iter_prefix_values<KArg1>(k1: KArg1) -> PrefixIterator<V>
where
@@ -657,10 +681,42 @@ pub trait StorageNMap<K: KeyGenerator, V: FullCodec> {
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear_prefix` instead"]
fn remove_prefix<KP>(partial_key: KP, limit: Option<u32>) -> sp_io::KillStorageResult
where
K: HasKeyPrefix<KP>;
/// Attempt to remove items from the map matching a `partial_key` prefix.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map which match the `partial key`. If so, then the map may not be
/// empty when the resultant `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must be provided in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map and `partial_key`. Subsequent
/// calls operating on the same map/`partial_key` should always pass `Some`, and this should be
/// equal to the previous call result's `maybe_cursor` field.
fn clear_prefix<KP>(
partial_key: KP,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
K: HasKeyPrefix<KP>;
/// Iterate over values that share the partial prefix key.
fn iter_prefix_values<KP>(partial_key: KP) -> PrefixIterator<V>
where
@@ -1111,8 +1167,36 @@ pub trait StoragePrefixedMap<Value: FullCodec> {
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear` instead"]
fn remove_all(limit: Option<u32>) -> sp_io::KillStorageResult {
sp_io::storage::clear_prefix(&Self::final_prefix(), limit)
unhashed::clear_prefix(&Self::final_prefix(), limit, None).into()
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults {
unhashed::clear_prefix(&Self::final_prefix(), Some(limit), maybe_cursor)
}
/// Iter over all value of the storage.
@@ -1427,7 +1511,7 @@ mod test {
assert_eq!(MyStorage::iter_values().collect::<Vec<_>>(), vec![1, 2, 3, 4]);
// test removal
MyStorage::remove_all(None);
let _ = MyStorage::clear(u32::max_value(), None);
assert!(MyStorage::iter_values().collect::<Vec<_>>().is_empty());
// test migration
@@ -1437,7 +1521,7 @@ mod test {
assert!(MyStorage::iter_values().collect::<Vec<_>>().is_empty());
MyStorage::translate_values(|v: u32| Some(v as u64));
assert_eq!(MyStorage::iter_values().collect::<Vec<_>>(), vec![1, 2]);
MyStorage::remove_all(None);
let _ = MyStorage::clear(u32::max_value(), None);
// test migration 2
unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u128);
@@ -1449,7 +1533,7 @@ mod test {
assert_eq!(MyStorage::iter_values().collect::<Vec<_>>(), vec![1, 2, 3]);
MyStorage::translate_values(|v: u128| Some(v as u64));
assert_eq!(MyStorage::iter_values().collect::<Vec<_>>(), vec![1, 2, 3]);
MyStorage::remove_all(None);
let _ = MyStorage::clear(u32::max_value(), None);
// test that other values are not modified.
assert_eq!(unhashed::get(&key_before[..]), Some(32u64));
@@ -31,6 +31,7 @@ use crate::{
Never,
};
use codec::{Decode, Encode, EncodeLike, FullCodec, MaxEncodedLen, Ref};
use sp_io::MultiRemovalResults;
use sp_runtime::traits::Saturating;
use sp_std::prelude::*;
@@ -273,13 +274,44 @@ where
<Self as MapWrapper>::Map::migrate_key::<OldHasher, _>(key)
}
/// Remove all value of the storage.
/// Remove all values in the map.
#[deprecated = "Use `clear` instead"]
pub fn remove_all() {
// NOTE: it is not possible to remove up to some limit because
// `sp_io::storage::clear_prefix` and `StorageMap::remove_all` don't give the number of
// value removed from the overlay.
CounterFor::<Prefix>::set(0u32);
#[allow(deprecated)]
<Self as MapWrapper>::Map::remove_all(None);
CounterFor::<Prefix>::kill();
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> MultiRemovalResults {
let result = <Self as MapWrapper>::Map::clear(limit, maybe_cursor);
match result.maybe_cursor {
None => CounterFor::<Prefix>::kill(),
Some(_) => CounterFor::<Prefix>::mutate(|x| x.saturating_reduce(result.unique)),
}
result
}
/// Iter over all value of the storage.
@@ -691,7 +723,7 @@ mod test {
assert_eq!(A::count(), 2);
// Remove all.
A::remove_all();
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::count(), 0);
assert_eq!(A::initialize_counter(), 0);
@@ -922,7 +954,7 @@ mod test {
assert_eq!(B::count(), 2);
// Remove all.
B::remove_all();
let _ = B::clear(u32::max_value(), None);
assert_eq!(B::count(), 0);
assert_eq!(B::initialize_counter(), 0);
@@ -229,13 +229,53 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear_prefix` instead"]
pub fn remove_prefix<KArg1>(k1: KArg1, limit: Option<u32>) -> sp_io::KillStorageResult
where
KArg1: ?Sized + EncodeLike<Key1>,
{
#[allow(deprecated)]
<Self as crate::storage::StorageDoubleMap<Key1, Key2, Value>>::remove_prefix(k1, limit)
}
/// Attempt to remove items from the map matching a `first_key` prefix.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map which match the `first_key`. If so, then the map may not be
/// empty when the resultant `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map and `first_key`. Subsequent
/// calls operating on the same map/`first_key` should always pass `Some`, and this should be
/// equal to the previous call result's `maybe_cursor` field.
pub fn clear_prefix<KArg1>(
first_key: KArg1,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
KArg1: ?Sized + EncodeLike<Key1>,
{
<Self as crate::storage::StorageDoubleMap<Key1, Key2, Value>>::clear_prefix(
first_key,
limit,
maybe_cursor,
)
}
/// Iterate over values that share the first key.
pub fn iter_prefix_values<KArg1>(k1: KArg1) -> crate::storage::PrefixIterator<Value>
where
@@ -359,10 +399,39 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear` instead"]
pub fn remove_all(limit: Option<u32>) -> sp_io::KillStorageResult {
#[allow(deprecated)]
<Self as crate::storage::StoragePrefixedMap<Value>>::remove_all(limit)
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen.A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults {
<Self as crate::storage::StoragePrefixedMap<Value>>::clear(limit, maybe_cursor)
}
/// Iter over all value of the storage.
///
/// NOTE: If a value failed to decode because storage is corrupted then it is skipped.
@@ -768,7 +837,7 @@ mod test {
A::insert(3, 30, 10);
A::insert(4, 40, 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key(3, 30), false);
assert_eq!(A::contains_key(4, 40), false);
@@ -829,7 +898,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len(3, 30), None);
WithLen::append(0, 100, 10);
assert_eq!(WithLen::decode_len(0, 100), Some(1));
@@ -843,7 +912,7 @@ mod test {
assert_eq!(A::iter_prefix_values(4).collect::<Vec<_>>(), vec![13, 14]);
assert_eq!(A::iter_prefix(4).collect::<Vec<_>>(), vec![(40, 13), (41, 14)]);
A::remove_prefix(3, None);
let _ = A::clear_prefix(3, u32::max_value(), None);
assert_eq!(A::iter_prefix(3).collect::<Vec<_>>(), vec![]);
assert_eq!(A::iter_prefix(4).collect::<Vec<_>>(), vec![(40, 13), (41, 14)]);
@@ -247,10 +247,39 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear` instead"]
pub fn remove_all(limit: Option<u32>) -> sp_io::KillStorageResult {
#[allow(deprecated)]
<Self as crate::storage::StoragePrefixedMap<Value>>::remove_all(limit)
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults {
<Self as crate::storage::StoragePrefixedMap<Value>>::clear(limit, maybe_cursor)
}
/// Iter over all value of the storage.
///
/// NOTE: If a value failed to decode because storage is corrupted then it is skipped.
@@ -563,7 +592,7 @@ mod test {
A::insert(3, 10);
A::insert(4, 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key(3), false);
assert_eq!(A::contains_key(4), false);
@@ -618,7 +647,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len(3), None);
WithLen::append(0, 10);
assert_eq!(WithLen::decode_len(0), Some(1));
+124 -28
View File
@@ -185,13 +185,53 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear_prefix` instead"]
pub fn remove_prefix<KP>(partial_key: KP, limit: Option<u32>) -> sp_io::KillStorageResult
where
Key: HasKeyPrefix<KP>,
{
#[allow(deprecated)]
<Self as crate::storage::StorageNMap<Key, Value>>::remove_prefix(partial_key, limit)
}
/// Attempt to remove items from the map matching a `partial_key` prefix.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map which match the `partial key`. If so, then the map may not be
/// empty when the resultant `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must be provided in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map and `partial_key`. Subsequent
/// calls operating on the same map/`partial_key` should always pass `Some`, and this should be
/// equal to the previous call result's `maybe_cursor` field.
pub fn clear_prefix<KP>(
partial_key: KP,
limit: u32,
maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults
where
Key: HasKeyPrefix<KP>,
{
<Self as crate::storage::StorageNMap<Key, Value>>::clear_prefix(
partial_key,
limit,
maybe_cursor,
)
}
/// Iterate over values that share the first key.
pub fn iter_prefix_values<KP>(partial_key: KP) -> PrefixIterator<Value>
where
@@ -299,8 +339,37 @@ where
/// Calling this multiple times per block with a `limit` set leads always to the same keys being
/// removed and the same result being returned. This happens because the keys to delete in the
/// overlay are not taken into account when deleting keys in the backend.
#[deprecated = "Use `clear` instead"]
pub fn remove_all(limit: Option<u32>) -> sp_io::KillStorageResult {
<Self as crate::storage::StoragePrefixedMap<Value>>::remove_all(limit)
#[allow(deprecated)]
<Self as crate::storage::StoragePrefixedMap<Value>>::remove_all(limit).into()
}
/// Attempt to remove all items from the map.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once
/// the resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given map, it is important that no further items
/// are inserted into the map. If so, then the map may not be empty when the resultant
/// `maybe_cursor` is `None`.
///
/// # Limit
///
/// A `limit` must always be provided through in order to cap the maximum
/// amount of deletions done in a single call. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A `limit` of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any given storage map. Subsequent calls
/// operating on the same map should always pass `Some`, and this should be equal to the
/// previous call result's `maybe_cursor` field.
pub fn clear(limit: u32, maybe_cursor: Option<&[u8]>) -> sp_io::MultiRemovalResults {
<Self as crate::storage::StoragePrefixedMap<Value>>::clear(limit, maybe_cursor)
}
/// Iter over all value of the storage.
@@ -544,7 +613,7 @@ mod test {
use crate::{
hash::{StorageHasher as _, *},
metadata::{StorageEntryModifier, StorageHasher},
storage::types::{Key, ValueQuery},
storage::types::{Key as NMapKey, ValueQuery},
};
use sp_io::{hashing::twox_128, TestExternalities};
@@ -565,12 +634,12 @@ mod test {
#[test]
fn test_1_key() {
type A = StorageNMap<Prefix, Key<Blake2_128Concat, u16>, u32, OptionQuery>;
type A = StorageNMap<Prefix, NMapKey<Blake2_128Concat, u16>, u32, OptionQuery>;
type AValueQueryWithAnOnEmpty =
StorageNMap<Prefix, Key<Blake2_128Concat, u16>, u32, ValueQuery, ADefault>;
type B = StorageNMap<Prefix, Key<Blake2_256, u16>, u32, ValueQuery>;
type C = StorageNMap<Prefix, Key<Blake2_128Concat, u16>, u8, ValueQuery>;
type WithLen = StorageNMap<Prefix, Key<Blake2_128Concat, u16>, Vec<u32>>;
StorageNMap<Prefix, NMapKey<Blake2_128Concat, u16>, u32, ValueQuery, ADefault>;
type B = StorageNMap<Prefix, NMapKey<Blake2_256, u16>, u32, ValueQuery>;
type C = StorageNMap<Prefix, NMapKey<Blake2_128Concat, u16>, u8, ValueQuery>;
type WithLen = StorageNMap<Prefix, NMapKey<Blake2_128Concat, u16>, Vec<u32>>;
TestExternalities::default().execute_with(|| {
let mut k: Vec<u8> = vec![];
@@ -590,13 +659,13 @@ mod test {
{
#[crate::storage_alias]
type Foo = StorageNMap<test, (Key<Blake2_128Concat, u16>), u32>;
type Foo = StorageNMap<test, (NMapKey<Blake2_128Concat, u16>), u32>;
assert_eq!(Foo::contains_key((3,)), true);
assert_eq!(Foo::get((3,)), Some(10));
}
A::swap::<Key<Blake2_128Concat, u16>, _, _>((3,), (2,));
A::swap::<NMapKey<Blake2_128Concat, u16>, _, _>((3,), (2,));
assert_eq!(A::contains_key((3,)), false);
assert_eq!(A::contains_key((2,)), true);
assert_eq!(A::get((3,)), None);
@@ -684,7 +753,7 @@ mod test {
A::insert((3,), 10);
A::insert((4,), 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key((3,)), false);
assert_eq!(A::contains_key((4,)), false);
@@ -739,7 +808,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len((3,)), None);
WithLen::append((0,), 10);
assert_eq!(WithLen::decode_len((0,)), Some(1));
@@ -750,26 +819,30 @@ mod test {
fn test_2_keys() {
type A = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>),
(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>),
u32,
OptionQuery,
>;
type AValueQueryWithAnOnEmpty = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>),
(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>),
u32,
ValueQuery,
ADefault,
>;
type B = StorageNMap<Prefix, (Key<Blake2_256, u16>, Key<Twox128, u8>), u32, ValueQuery>;
type B =
StorageNMap<Prefix, (NMapKey<Blake2_256, u16>, NMapKey<Twox128, u8>), u32, ValueQuery>;
type C = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>),
(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>),
u8,
ValueQuery,
>;
type WithLen =
StorageNMap<Prefix, (Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>), Vec<u32>>;
type WithLen = StorageNMap<
Prefix,
(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>),
Vec<u32>,
>;
TestExternalities::default().execute_with(|| {
let mut k: Vec<u8> = vec![];
@@ -788,7 +861,10 @@ mod test {
assert_eq!(A::get((3, 30)), Some(10));
assert_eq!(AValueQueryWithAnOnEmpty::get((3, 30)), 10);
A::swap::<(Key<Blake2_128Concat, u16>, Key<Twox64Concat, u8>), _, _>((3, 30), (2, 20));
A::swap::<(NMapKey<Blake2_128Concat, u16>, NMapKey<Twox64Concat, u8>), _, _>(
(3, 30),
(2, 20),
);
assert_eq!(A::contains_key((3, 30)), false);
assert_eq!(A::contains_key((2, 20)), true);
assert_eq!(A::get((3, 30)), None);
@@ -877,7 +953,7 @@ mod test {
A::insert((3, 30), 10);
A::insert((4, 40), 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key((3, 30)), false);
assert_eq!(A::contains_key((4, 40)), false);
@@ -938,7 +1014,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len((3, 30)), None);
WithLen::append((0, 100), 10);
assert_eq!(WithLen::decode_len((0, 100)), Some(1));
@@ -956,32 +1032,48 @@ mod test {
fn test_3_keys() {
type A = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
u32,
OptionQuery,
>;
type AValueQueryWithAnOnEmpty = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
u32,
ValueQuery,
ADefault,
>;
type B = StorageNMap<
Prefix,
(Key<Blake2_256, u16>, Key<Blake2_256, u16>, Key<Twox128, u16>),
(NMapKey<Blake2_256, u16>, NMapKey<Blake2_256, u16>, NMapKey<Twox128, u16>),
u32,
ValueQuery,
>;
type C = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
u8,
ValueQuery,
>;
type WithLen = StorageNMap<
Prefix,
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
Vec<u32>,
>;
@@ -1004,7 +1096,11 @@ mod test {
assert_eq!(AValueQueryWithAnOnEmpty::get((1, 10, 100)), 30);
A::swap::<
(Key<Blake2_128Concat, u16>, Key<Blake2_128Concat, u16>, Key<Twox64Concat, u16>),
(
NMapKey<Blake2_128Concat, u16>,
NMapKey<Blake2_128Concat, u16>,
NMapKey<Twox64Concat, u16>,
),
_,
_,
>((1, 10, 100), (2, 20, 200));
@@ -1093,7 +1189,7 @@ mod test {
A::insert((3, 30, 300), 10);
A::insert((4, 40, 400), 10);
A::remove_all(None);
let _ = A::clear(u32::max_value(), None);
assert_eq!(A::contains_key((3, 30, 300)), false);
assert_eq!(A::contains_key((4, 40, 400)), false);
@@ -1161,7 +1257,7 @@ mod test {
]
);
WithLen::remove_all(None);
let _ = WithLen::clear(u32::max_value(), None);
assert_eq!(WithLen::decode_len((3, 30, 300)), None);
WithLen::append((0, 100, 1000), 10);
assert_eq!(WithLen::decode_len((0, 100, 1000)), Some(1));
@@ -96,10 +96,63 @@ pub fn kill(key: &[u8]) {
}
/// Ensure keys with the given `prefix` have no entries in storage.
#[deprecated = "Use `clear_prefix` instead"]
pub fn kill_prefix(prefix: &[u8], limit: Option<u32>) -> sp_io::KillStorageResult {
// TODO: Once the network has upgraded to include the new host functions, this code can be
// enabled.
// clear_prefix(prefix, limit).into()
sp_io::storage::clear_prefix(prefix, limit)
}
/// Partially clear the storage of all keys under a common `prefix`.
///
/// # Limit
///
/// A *limit* should always be provided through `maybe_limit`. This is one fewer than the
/// maximum number of backend iterations which may be done by this operation and as such
/// represents the maximum number of backend deletions which may happen. A *limit* of zero
/// implies that no keys will be deleted, though there may be a single iteration done.
///
/// The limit can be used to partially delete storage items in case it is too large or costly
/// to delete all in a single operation.
///
/// # Cursor
///
/// A *cursor* may be passed in to this operation with `maybe_cursor`. `None` should only be
/// passed once (in the initial call) for any attempt to clear storage. In general, subsequent calls
/// operating on the same prefix should pass `Some` and this value should be equal to the
/// previous call result's `maybe_cursor` field. The only exception to this is when you can
/// guarantee that the subsequent call is in a new block; in this case the previous call's result
/// cursor need not be passed in an a `None` may be passed instead. This exception may be useful
/// then making this call solely from a block-hook such as `on_initialize`.
///
/// Returns [`MultiRemovalResults`](sp_io::MultiRemovalResults) to inform about the result. Once the
/// resultant `maybe_cursor` field is `None`, then no further items remain to be deleted.
///
/// NOTE: After the initial call for any given child storage, it is important that no keys further
/// keys are inserted. If so, then they may or may not be deleted by subsequent calls.
///
/// # Note
///
/// Please note that keys which are residing in the overlay for the child are deleted without
/// counting towards the `limit`.
pub fn clear_prefix(
prefix: &[u8],
maybe_limit: Option<u32>,
_maybe_cursor: Option<&[u8]>,
) -> sp_io::MultiRemovalResults {
// TODO: Once the network has upgraded to include the new host functions, this code can be
// enabled.
// sp_io::storage::clear_prefix(prefix, maybe_limit, maybe_cursor)
use sp_io::{KillStorageResult::*, MultiRemovalResults};
#[allow(deprecated)]
let (maybe_cursor, i) = match kill_prefix(prefix, maybe_limit) {
AllRemoved(i) => (None, i),
SomeRemaining(i) => (Some(prefix.to_vec()), i),
};
MultiRemovalResults { maybe_cursor, backend: i, unique: i, loops: i }
}
/// Get a Vec of bytes from storage.
pub fn get_raw(key: &[u8]) -> Option<Vec<u8>> {
sp_io::storage::get(key)