Rework storage iterators (#13284)

* Rework storage iterators

* Make sure storage iteration is also accounted for when benchmarking

* Use `trie-db` from crates.io

* Appease clippy

* Bump `trie-bench` to 0.35.0

* Fix tests' compilation

* Update comment to clarify how `IterArgs::start_at` works

* Add extra tests

* Fix iterators on `Client` so that they behave as before

* Add extra `unwrap`s in tests

* More clippy fixes

* Come on clippy, give me a break already

* Rename `allow_missing` to `stop_on_incomplete_database`

* Add `#[inline]` to `with_recorder_and_cache`

* Use `with_recorder_and_cache` in `with_trie_db`; add doc comment

* Simplify code: use `with_trie_db` in `next_storage_key_from_root`

* Remove `expect`s in the benchmarking CLI

* Add extra doc comments

* Move `RawIter` before `TrieBackendEssence` (no code changes; just cut-paste)

* Remove a TODO in tests

* Update comment for `StorageIterator::was_complete`

* Update `trie-db` to 0.25.1
This commit is contained in:
Koute
2023-02-22 16:49:25 +09:00
committed by GitHub
parent 236bbbd5ef
commit f8e3bdad3d
27 changed files with 1097 additions and 742 deletions
+213 -23
View File
@@ -24,12 +24,135 @@ use crate::{
StorageKey, StorageValue, UsageInfo,
};
use codec::Encode;
use core::marker::PhantomData;
use hash_db::Hasher;
use sp_core::storage::{ChildInfo, StateVersion, TrackedStorageKey};
#[cfg(feature = "std")]
use sp_core::traits::RuntimeCode;
use sp_std::vec::Vec;
/// A struct containing arguments for iterating over the storage.
#[derive(Default)]
#[non_exhaustive]
pub struct IterArgs<'a> {
/// The prefix of the keys over which to iterate.
pub prefix: Option<&'a [u8]>,
/// The prefix from which to start the iteration from.
///
/// This is inclusive and the iteration will include the key which is specified here.
pub start_at: Option<&'a [u8]>,
/// The info of the child trie over which to iterate over.
pub child_info: Option<ChildInfo>,
/// Whether to stop iteration when a missing trie node is reached.
///
/// When a missing trie node is reached the iterator will:
/// - return an error if this is set to `false` (default)
/// - return `None` if this is set to `true`
pub stop_on_incomplete_database: bool,
}
/// A trait for a raw storage iterator.
pub trait StorageIterator<H>
where
H: Hasher,
{
/// The state backend over which the iterator is iterating.
type Backend;
/// The error type.
type Error;
/// Fetches the next key from the storage.
fn next_key(
&mut self,
backend: &Self::Backend,
) -> Option<core::result::Result<StorageKey, Self::Error>>;
/// Fetches the next key and value from the storage.
fn next_pair(
&mut self,
backend: &Self::Backend,
) -> Option<core::result::Result<(StorageKey, StorageValue), Self::Error>>;
/// Returns whether the end of iteration was reached without an error.
fn was_complete(&self) -> bool;
}
/// An iterator over storage keys and values.
pub struct PairsIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H>,
{
backend: Option<&'a I::Backend>,
raw_iter: I,
_phantom: PhantomData<H>,
}
impl<'a, H, I> Iterator for PairsIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H>,
{
type Item = Result<(Vec<u8>, Vec<u8>), <I as StorageIterator<H>>::Error>;
fn next(&mut self) -> Option<Self::Item> {
self.raw_iter.next_pair(self.backend.as_ref()?)
}
}
impl<'a, H, I> Default for PairsIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H> + Default,
{
fn default() -> Self {
Self {
backend: Default::default(),
raw_iter: Default::default(),
_phantom: Default::default(),
}
}
}
/// An iterator over storage keys.
pub struct KeysIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H>,
{
backend: Option<&'a I::Backend>,
raw_iter: I,
_phantom: PhantomData<H>,
}
impl<'a, H, I> Iterator for KeysIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H>,
{
type Item = Result<Vec<u8>, <I as StorageIterator<H>>::Error>;
fn next(&mut self) -> Option<Self::Item> {
self.raw_iter.next_key(self.backend.as_ref()?)
}
}
impl<'a, H, I> Default for KeysIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H> + Default,
{
fn default() -> Self {
Self {
backend: Default::default(),
raw_iter: Default::default(),
_phantom: Default::default(),
}
}
}
/// A state backend is used to read state data and can have changes committed
/// to it.
///
@@ -44,6 +167,9 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
/// Type of trie backend storage.
type TrieBackendStorage: TrieBackendStorage<H, Overlay = Self::Transaction>;
/// Type of the raw storage iterator.
type RawIter: StorageIterator<H, Backend = Self, Error = Self::Error>;
/// Get keyed storage or None if there is nothing associated.
fn storage(&self, key: &[u8]) -> Result<Option<StorageValue>, Self::Error>;
@@ -95,43 +221,103 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
/// Otherwise an error is produced.
///
/// Returns `true` if trie end is reached.
// TODO: Remove this.
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
mut f: F,
allow_missing: bool,
) -> Result<bool, Self::Error>;
) -> Result<bool, Self::Error> {
let args = IterArgs {
child_info: child_info.cloned(),
prefix,
start_at,
stop_on_incomplete_database: allow_missing,
..IterArgs::default()
};
let mut iter = self.pairs(args)?;
while let Some(key_value) = iter.next() {
let (key, value) = key_value?;
if !f(key, value) {
return Ok(false)
}
}
Ok(iter.raw_iter.was_complete())
}
/// Retrieve all entries keys of storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
// TODO: Remove this.
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
);
mut f: F,
) -> Result<(), Self::Error> {
let args =
IterArgs { child_info: child_info.cloned(), prefix, start_at, ..IterArgs::default() };
for key in self.keys(args)? {
if !f(&key?) {
return Ok(())
}
}
Ok(())
}
/// Retrieve all entries keys which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], mut f: F) {
self.for_key_values_with_prefix(prefix, |k, _v| f(k))
// TODO: Remove this.
fn for_keys_with_prefix<F: FnMut(&[u8])>(
&self,
prefix: &[u8],
mut f: F,
) -> Result<(), Self::Error> {
let args = IterArgs { prefix: Some(prefix), ..IterArgs::default() };
self.keys(args)?.try_for_each(|key| {
f(&key?);
Ok(())
})
}
/// Retrieve all entries keys and values of which start with the given prefix and
/// call `f` for each of those keys.
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F);
// TODO: Remove this.
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(
&self,
prefix: &[u8],
mut f: F,
) -> Result<(), Self::Error> {
let args = IterArgs { prefix: Some(prefix), ..IterArgs::default() };
self.pairs(args)?.try_for_each(|key_value| {
let (key, value) = key_value?;
f(&key, &value);
Ok(())
})
}
/// Retrieve all child entries keys which start with the given prefix and
/// call `f` for each of those keys.
// TODO: Remove this.
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
);
mut f: F,
) -> Result<(), Self::Error> {
let args = IterArgs {
child_info: Some(child_info.clone()),
prefix: Some(prefix),
..IterArgs::default()
};
self.keys(args)?.try_for_each(|key| {
f(&key?);
Ok(())
})
}
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
@@ -156,21 +342,25 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
where
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(StorageKey, StorageValue)>;
/// Returns a lifetimeless raw storage iterator.
fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error>;
/// Get all keys with given prefix
fn keys(&self, prefix: &[u8]) -> Vec<StorageKey> {
let mut all = Vec::new();
self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec()));
all
/// Get an iterator over key/value pairs.
fn pairs<'a>(&'a self, args: IterArgs) -> Result<PairsIter<'a, H, Self::RawIter>, Self::Error> {
Ok(PairsIter {
backend: Some(self),
raw_iter: self.raw_iter(args)?,
_phantom: Default::default(),
})
}
/// Get all keys of child storage with given prefix
fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec<StorageKey> {
let mut all = Vec::new();
self.for_child_keys_with_prefix(child_info, prefix, |k| all.push(k.to_vec()));
all
/// Get an iterator over keys.
fn keys<'a>(&'a self, args: IterArgs) -> Result<KeysIter<'a, H, Self::RawIter>, Self::Error> {
Ok(KeysIter {
backend: Some(self),
raw_iter: self.raw_iter(args)?,
_phantom: Default::default(),
})
}
/// Calculate the storage root, with given delta over what is already stored
@@ -309,7 +499,7 @@ where
#[cfg(feature = "std")]
pub struct BackendRuntimeCode<'a, B, H> {
backend: &'a B,
_marker: std::marker::PhantomData<H>,
_marker: PhantomData<H>,
}
#[cfg(feature = "std")]
@@ -332,7 +522,7 @@ where
{
/// Create a new instance.
pub fn new(backend: &'a B) -> Self {
Self { backend, _marker: std::marker::PhantomData }
Self { backend, _marker: PhantomData }
}
/// Return the [`RuntimeCode`] build from the wrapped `backend`.
+31 -24
View File
@@ -159,9 +159,10 @@ where
use std::collections::HashMap;
self.backend
.pairs()
.iter()
.map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec())))
.pairs(Default::default())
.expect("never fails in tests; qed.")
.map(|key_value| key_value.expect("never fails in tests; qed."))
.map(|(k, v)| (k, Some(v)))
.chain(self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())))
.collect::<HashMap<_, _>>()
.into_iter()
@@ -757,28 +758,34 @@ where
let mut delete_count: u32 = 0;
let mut loop_count: u32 = 0;
let mut maybe_next_key = None;
self.backend
.apply_to_keys_while(maybe_child, maybe_prefix, maybe_cursor, |key| {
if maybe_limit.map_or(false, |limit| loop_count == limit) {
maybe_next_key = Some(key.to_vec());
return false
}
let overlay = match maybe_child {
Some(child_info) => self.overlay.child_storage(child_info, key),
None => self.overlay.storage(key),
};
if !matches!(overlay, Some(None)) {
// not pending deletion from the backend - delete it.
if let Some(child_info) = maybe_child {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
} else {
self.overlay.set_storage(key.to_vec(), None);
let result =
self.backend
.apply_to_keys_while(maybe_child, maybe_prefix, maybe_cursor, |key| {
if maybe_limit.map_or(false, |limit| loop_count == limit) {
maybe_next_key = Some(key.to_vec());
return false
}
delete_count = delete_count.saturating_add(1);
}
loop_count = loop_count.saturating_add(1);
true
});
let overlay = match maybe_child {
Some(child_info) => self.overlay.child_storage(child_info, key),
None => self.overlay.storage(key),
};
if !matches!(overlay, Some(None)) {
// not pending deletion from the backend - delete it.
if let Some(child_info) = maybe_child {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
} else {
self.overlay.set_storage(key.to_vec(), None);
}
delete_count = delete_count.saturating_add(1);
}
loop_count = loop_count.saturating_add(1);
true
});
if let Err(error) = result {
log::debug!(target: "trie", "Error while iterating the storage: {}", error);
}
(maybe_next_key, delete_count, loop_count)
}
}
@@ -123,7 +123,7 @@ impl sp_std::fmt::Display for DefaultError {
}
pub use crate::{
backend::Backend,
backend::{Backend, IterArgs, KeysIter, PairsIter, StorageIterator},
error::{Error, ExecutionError},
ext::Ext,
overlayed_changes::{
@@ -241,7 +241,12 @@ where
H::Out: Ord + codec::Codec,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "overlay: {:?}\nbackend: {:?}", self.overlay, self.backend.pairs())
let pairs: Vec<_> = self
.backend
.pairs(Default::default())
.expect("creating an iterator over all of the pairs doesn't fail in tests")
.collect();
write!(f, "overlay: {:?}\nbackend: {:?}", self.overlay, pairs)
}
}
@@ -20,6 +20,7 @@
#[cfg(feature = "std")]
use crate::backend::AsTrieBackend;
use crate::{
backend::IterArgs,
trie_backend_essence::{TrieBackendEssence, TrieBackendStorage},
Backend, StorageKey, StorageValue,
};
@@ -28,7 +29,6 @@ use codec::Codec;
use hash_db::HashDB;
use hash_db::Hasher;
use sp_core::storage::{ChildInfo, StateVersion};
use sp_std::vec::Vec;
#[cfg(feature = "std")]
use sp_trie::{cache::LocalTrieCache, recorder::Recorder};
#[cfg(feature = "std")]
@@ -51,6 +51,7 @@ pub trait AsLocalTrieCache<H: Hasher>: sealed::Sealed {
impl<H: Hasher> AsLocalTrieCache<H> for LocalTrieCache<H> {
#[cfg(feature = "std")]
#[inline]
fn as_local_trie_cache(&self) -> &LocalTrieCache<H> {
self
}
@@ -58,6 +59,7 @@ impl<H: Hasher> AsLocalTrieCache<H> for LocalTrieCache<H> {
#[cfg(feature = "std")]
impl<H: Hasher> AsLocalTrieCache<H> for &LocalTrieCache<H> {
#[inline]
fn as_local_trie_cache(&self) -> &LocalTrieCache<H> {
self
}
@@ -236,6 +238,7 @@ where
type Error = crate::DefaultError;
type Transaction = S::Overlay;
type TrieBackendStorage = S;
type RawIter = crate::trie_backend_essence::RawIter<S, H, C>;
fn storage_hash(&self, key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.essence.storage_hash(key)
@@ -273,51 +276,8 @@ where
self.essence.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.essence.for_keys_with_prefix(prefix, f)
}
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) {
self.essence.for_key_values_with_prefix(prefix, f)
}
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result<bool, Self::Error> {
self.essence
.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing)
}
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.essence.apply_to_keys_while(child_info, prefix, start_at, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.essence.for_child_keys_with_prefix(child_info, prefix, f)
}
fn pairs(&self) -> Vec<(StorageKey, StorageValue)> {
self.essence.pairs()
}
fn keys(&self, prefix: &[u8]) -> Vec<StorageKey> {
self.essence.keys(prefix)
fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
self.essence.raw_iter(args)
}
fn storage_root<'a>(
@@ -579,7 +539,11 @@ pub mod tests {
cache: Option<Cache>,
recorder: Option<Recorder>,
) {
assert!(!test_trie(state_version, cache, recorder).pairs().is_empty());
assert!(!test_trie(state_version, cache, recorder)
.pairs(Default::default())
.unwrap()
.next()
.is_none());
}
#[test]
@@ -589,8 +553,163 @@ pub mod tests {
Default::default(),
)
.build()
.pairs()
.is_empty());
.pairs(Default::default())
.unwrap()
.next()
.is_none());
}
parameterized_test!(storage_iteration_works, storage_iteration_works_inner);
fn storage_iteration_works_inner(
state_version: StateVersion,
cache: Option<Cache>,
recorder: Option<Recorder>,
) {
let trie = test_trie(state_version, cache, recorder);
// Fetch everything.
assert_eq!(
trie.keys(Default::default())
.unwrap()
.map(|result| result.unwrap())
.take(5)
.collect::<Vec<_>>(),
vec![
b":child_storage:default:sub1".to_vec(),
b":code".to_vec(),
b"key".to_vec(),
b"value1".to_vec(),
b"value2".to_vec(),
]
);
// Fetch starting at a given key (full key).
assert_eq!(
trie.keys(IterArgs { start_at: Some(b"key"), ..IterArgs::default() })
.unwrap()
.map(|result| result.unwrap())
.take(3)
.collect::<Vec<_>>(),
vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]
);
// Fetch starting at a given key (partial key).
assert_eq!(
trie.keys(IterArgs { start_at: Some(b"ke"), ..IterArgs::default() })
.unwrap()
.map(|result| result.unwrap())
.take(3)
.collect::<Vec<_>>(),
vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]
);
// Fetch starting at a given key (empty key).
assert_eq!(
trie.keys(IterArgs { start_at: Some(b""), ..IterArgs::default() })
.unwrap()
.map(|result| result.unwrap())
.take(5)
.collect::<Vec<_>>(),
vec![
b":child_storage:default:sub1".to_vec(),
b":code".to_vec(),
b"key".to_vec(),
b"value1".to_vec(),
b"value2".to_vec(),
]
);
// Fetch starting at a given key and with prefix which doesn't match that key.
assert!(trie
.keys(IterArgs {
prefix: Some(b"value"),
start_at: Some(b"key"),
..IterArgs::default()
})
.unwrap()
.map(|result| result.unwrap())
.next()
.is_none());
// Fetch starting at a given key and with prefix which does match that key.
assert_eq!(
trie.keys(IterArgs {
prefix: Some(b"value"),
start_at: Some(b"value"),
..IterArgs::default()
})
.unwrap()
.map(|result| result.unwrap())
.collect::<Vec<_>>(),
vec![b"value1".to_vec(), b"value2".to_vec(),]
);
// Also test out the wrapper methods.
// TODO: Remove this once these methods are gone.
let mut list = Vec::new();
assert!(trie
.apply_to_key_values_while(
None,
None,
Some(b"key"),
|key, _| {
list.push(key);
true
},
false
)
.unwrap());
assert_eq!(list[0..3], vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]);
let mut list = Vec::new();
trie.apply_to_keys_while(None, None, Some(b"key"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(list[0..3], vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]);
let mut list = Vec::new();
trie.apply_to_keys_while(None, None, Some(b"k"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(list[0..3], vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]);
let mut list = Vec::new();
trie.apply_to_keys_while(None, None, Some(b""), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(
list[0..5],
vec![
b":child_storage:default:sub1".to_vec(),
b":code".to_vec(),
b"key".to_vec(),
b"value1".to_vec(),
b"value2".to_vec(),
]
);
let mut list = Vec::new();
trie.apply_to_keys_while(None, Some(b"value"), Some(b"key"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert!(list.is_empty());
let mut list = Vec::new();
trie.apply_to_keys_while(None, Some(b"value"), Some(b"value"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(list, vec![b"value1".to_vec(), b"value2".to_vec(),]);
}
parameterized_test!(storage_root_is_non_default, storage_root_is_non_default_inner);
@@ -638,7 +757,8 @@ pub mod tests {
trie.for_keys_with_prefix(b"value", |key| {
let for_first_time = seen.insert(key.to_vec());
assert!(for_first_time, "Seen key '{:?}' more than once", key);
});
})
.unwrap();
let mut expected = HashSet::new();
expected.insert(b"value1".to_vec());
@@ -664,7 +784,8 @@ pub mod tests {
.collect::<Vec<_>>();
let trie = test_trie(state_version, cache, recorder);
let keys = trie.keys(&[]);
let keys: Vec<_> =
trie.keys(Default::default()).unwrap().map(|result| result.unwrap()).collect();
assert_eq!(expected, keys);
}
@@ -724,7 +845,18 @@ pub mod tests {
.with_recorder(Recorder::default())
.build();
assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap());
assert_eq!(trie_backend.pairs(), proving_backend.pairs());
assert_eq!(
trie_backend
.pairs(Default::default())
.unwrap()
.map(|result| result.unwrap())
.collect::<Vec<_>>(),
proving_backend
.pairs(Default::default())
.unwrap()
.map(|result| result.unwrap())
.collect::<Vec<_>>()
);
let (trie_root, mut trie_mdb) =
trie_backend.storage_root(std::iter::empty(), state_version);
@@ -19,24 +19,23 @@
//! from storage.
use crate::{
backend::Consolidate, debug, trie_backend::AsLocalTrieCache, warn, StorageKey, StorageValue,
backend::{Consolidate, IterArgs, StorageIterator},
trie_backend::AsLocalTrieCache,
warn, StorageKey, StorageValue,
};
use codec::Codec;
use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix};
#[cfg(feature = "std")]
use parking_lot::RwLock;
use sp_core::storage::{ChildInfo, ChildType, StateVersion};
#[cfg(not(feature = "std"))]
use sp_std::marker::PhantomData;
use sp_std::{boxed::Box, vec::Vec};
use sp_std::{boxed::Box, marker::PhantomData, vec::Vec};
#[cfg(feature = "std")]
use sp_trie::recorder::Recorder;
use sp_trie::{
child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash,
read_child_trie_value, read_trie_value,
trie_types::{TrieDBBuilder, TrieError},
DBValue, KeySpacedDB, NodeCodec, Trie, TrieCache, TrieDBIterator, TrieDBKeyIterator,
TrieRecorder,
DBValue, KeySpacedDB, NodeCodec, Trie, TrieCache, TrieDBRawIterator, TrieRecorder,
};
#[cfg(feature = "std")]
use std::{collections::HashMap, sync::Arc};
@@ -76,6 +75,109 @@ impl<H> Cache<H> {
}
}
enum IterState {
Pending,
FinishedComplete,
FinishedIncomplete,
}
/// A raw iterator over the storage.
pub struct RawIter<S, H, C>
where
H: Hasher,
{
stop_on_incomplete_database: bool,
root: H::Out,
child_info: Option<ChildInfo>,
trie_iter: TrieDBRawIterator<Layout<H>>,
state: IterState,
_phantom: PhantomData<(S, C)>,
}
impl<S, H, C> RawIter<S, H, C>
where
H: Hasher,
S: TrieBackendStorage<H>,
H::Out: Codec + Ord,
C: AsLocalTrieCache<H> + Send + Sync,
{
#[inline]
fn prepare<R>(
&mut self,
backend: &TrieBackendEssence<S, H, C>,
callback: impl FnOnce(
&sp_trie::TrieDB<Layout<H>>,
&mut TrieDBRawIterator<Layout<H>>,
) -> Option<core::result::Result<R, Box<TrieError<<H as Hasher>::Out>>>>,
) -> Option<Result<R>> {
if !matches!(self.state, IterState::Pending) {
return None
}
let result = backend.with_trie_db(self.root, self.child_info.as_ref(), |db| {
callback(&db, &mut self.trie_iter)
});
match result {
Some(Ok(key_value)) => Some(Ok(key_value)),
None => {
self.state = IterState::FinishedComplete;
None
},
Some(Err(error)) => {
self.state = IterState::FinishedIncomplete;
if matches!(*error, TrieError::IncompleteDatabase(_)) &&
self.stop_on_incomplete_database
{
None
} else {
Some(Err(format!("TrieDB iteration error: {}", error)))
}
},
}
}
}
impl<S, H, C> Default for RawIter<S, H, C>
where
H: Hasher,
{
fn default() -> Self {
Self {
stop_on_incomplete_database: false,
child_info: None,
root: Default::default(),
trie_iter: TrieDBRawIterator::empty(),
state: IterState::FinishedComplete,
_phantom: Default::default(),
}
}
}
impl<S, H, C> StorageIterator<H> for RawIter<S, H, C>
where
H: Hasher,
S: TrieBackendStorage<H>,
H::Out: Codec + Ord,
C: AsLocalTrieCache<H> + Send + Sync,
{
type Backend = crate::TrieBackend<S, H, C>;
type Error = crate::DefaultError;
#[inline]
fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey>> {
self.prepare(&backend.essence, |trie, trie_iter| trie_iter.next_key(&trie))
}
#[inline]
fn next_pair(&mut self, backend: &Self::Backend) -> Option<Result<(StorageKey, StorageValue)>> {
self.prepare(&backend.essence, |trie, trie_iter| trie_iter.next_item(&trie))
}
fn was_complete(&self) -> bool {
matches!(self.state, IterState::FinishedComplete)
}
}
/// Patricia trie-based pairs storage essence.
pub struct TrieBackendEssence<S: TrieBackendStorage<H>, H: Hasher, C> {
storage: S,
@@ -168,6 +270,7 @@ impl<S: TrieBackendStorage<H>, H: Hasher, C: AsLocalTrieCache<H>> TrieBackendEss
///
/// If the given `storage_root` is `None`, `self.root` will be used.
#[cfg(feature = "std")]
#[inline]
fn with_recorder_and_cache<R>(
&self,
storage_root: Option<H::Out>,
@@ -193,6 +296,7 @@ impl<S: TrieBackendStorage<H>, H: Hasher, C: AsLocalTrieCache<H>> TrieBackendEss
}
#[cfg(not(feature = "std"))]
#[inline]
fn with_recorder_and_cache<R>(
&self,
_: Option<H::Out>,
@@ -262,6 +366,31 @@ impl<S: TrieBackendStorage<H>, H: Hasher, C: AsLocalTrieCache<H> + Send + Sync>
where
H::Out: Codec + Ord,
{
/// Calls the given closure with a [`TrieDb`] constructed for the given
/// storage root and (optionally) child trie.
#[inline]
fn with_trie_db<R>(
&self,
root: H::Out,
child_info: Option<&ChildInfo>,
callback: impl FnOnce(&sp_trie::TrieDB<Layout<H>>) -> R,
) -> R {
let backend = self as &dyn HashDBRef<H, Vec<u8>>;
let db = child_info
.as_ref()
.map(|child_info| KeySpacedDB::new(backend, child_info.keyspace()));
let db = db.as_ref().map(|db| db as &dyn HashDBRef<H, Vec<u8>>).unwrap_or(backend);
self.with_recorder_and_cache(Some(root), |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(db, &root)
.with_optional_recorder(recorder)
.with_optional_cache(cache)
.build();
callback(&trie)
})
}
/// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in
/// lexicographic order.
pub fn next_storage_key(&self, key: &[u8]) -> Result<Option<StorageKey>> {
@@ -316,21 +445,7 @@ where
child_info: Option<&ChildInfo>,
key: &[u8],
) -> Result<Option<StorageKey>> {
let dyn_eph: &dyn HashDBRef<_, _>;
let keyspace_eph;
if let Some(child_info) = child_info.as_ref() {
keyspace_eph = KeySpacedDB::new(self, child_info.keyspace());
dyn_eph = &keyspace_eph;
} else {
dyn_eph = self;
}
self.with_recorder_and_cache(Some(*root), |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(dyn_eph, root)
.with_optional_recorder(recorder)
.with_optional_cache(cache)
.build();
self.with_trie_db(*root, child_info, |trie| {
let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?;
// The key just after the one given in input, basically `key++0`.
@@ -429,246 +544,42 @@ where
})
}
/// Retrieve all entries keys of storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
///
/// Returns `true` when all keys were iterated.
pub fn apply_to_key_values_while(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: impl FnMut(Vec<u8>, Vec<u8>) -> bool,
allow_missing_nodes: bool,
) -> Result<bool> {
let root = if let Some(child_info) = child_info.as_ref() {
match self.child_root(child_info)? {
Some(child_root) => child_root,
None => return Ok(true),
}
/// Create a raw iterator over the storage.
pub fn raw_iter(&self, args: IterArgs) -> Result<RawIter<S, H, C>> {
let root = if let Some(child_info) = args.child_info.as_ref() {
let root = match self.child_root(&child_info)? {
Some(root) => root,
None => return Ok(Default::default()),
};
root
} else {
self.root
};
self.trie_iter_inner(&root, prefix, f, child_info, start_at, allow_missing_nodes)
}
/// Retrieve all entries keys of a storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
pub fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
let root = if let Some(child_info) = child_info.as_ref() {
match self.child_root(child_info) {
Ok(Some(v)) => v,
// If the child trie doesn't exist, there is no need to continue.
Ok(None) => return,
Err(e) => {
debug!(target: "trie", "Error while iterating child storage: {}", e);
return
},
}
} else {
self.root
};
self.trie_iter_key_inner(&root, prefix, f, child_info, start_at)
}
/// Execute given closure for all keys starting with prefix.
pub fn for_child_keys_with_prefix(
&self,
child_info: &ChildInfo,
prefix: &[u8],
mut f: impl FnMut(&[u8]),
) {
let root = match self.child_root(child_info) {
Ok(Some(v)) => v,
// If the child trie doesn't exist, there is no need to continue.
Ok(None) => return,
Err(e) => {
debug!(target: "trie", "Error while iterating child storage: {}", e);
return
},
};
self.trie_iter_key_inner(
&root,
Some(prefix),
|k| {
f(k);
true
},
Some(child_info),
None,
)
}
/// Execute given closure for all keys starting with prefix.
pub fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], mut f: F) {
self.trie_iter_key_inner(
&self.root,
Some(prefix),
|k| {
f(k);
true
},
None,
None,
)
}
fn trie_iter_key_inner<F: FnMut(&[u8]) -> bool>(
&self,
root: &H::Out,
maybe_prefix: Option<&[u8]>,
mut f: F,
child_info: Option<&ChildInfo>,
maybe_start_at: Option<&[u8]>,
) {
let mut iter = move |db| -> sp_std::result::Result<(), Box<TrieError<H::Out>>> {
self.with_recorder_and_cache(Some(*root), |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(db, root)
.with_optional_recorder(recorder)
.with_optional_cache(cache)
.build();
let prefix = maybe_prefix.unwrap_or(&[]);
let iter = match maybe_start_at {
Some(start_at) =>
TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at),
None => TrieDBKeyIterator::new_prefixed(&trie, prefix),
}?;
for x in iter {
let key = x?;
debug_assert!(maybe_prefix
.as_ref()
.map(|prefix| key.starts_with(prefix))
.unwrap_or(true));
if !f(&key) {
break
}
}
Ok(())
})
};
let result = if let Some(child_info) = child_info {
let db = KeySpacedDB::new(self, child_info.keyspace());
iter(&db)
} else {
iter(self)
};
if let Err(e) = result {
debug!(target: "trie", "Error while iterating by prefix: {}", e);
if self.root == Default::default() {
// A special-case for an empty storage root.
return Ok(Default::default())
}
}
fn trie_iter_inner<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
root: &H::Out,
prefix: Option<&[u8]>,
mut f: F,
child_info: Option<&ChildInfo>,
start_at: Option<&[u8]>,
allow_missing_nodes: bool,
) -> Result<bool> {
let mut iter = move |db| -> sp_std::result::Result<bool, Box<TrieError<H::Out>>> {
self.with_recorder_and_cache(Some(*root), |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(db, root)
.with_optional_recorder(recorder)
.with_optional_cache(cache)
.build();
let prefix = prefix.unwrap_or(&[]);
let iterator = if let Some(start_at) = start_at {
TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)?
let trie_iter = self
.with_trie_db(root, args.child_info.as_ref(), |db| {
let prefix = args.prefix.as_deref().unwrap_or(&[]);
if let Some(start_at) = args.start_at {
TrieDBRawIterator::new_prefixed_then_seek(db, prefix, &start_at)
} else {
TrieDBIterator::new_prefixed(&trie, prefix)?
};
for x in iterator {
let (key, value) = x?;
debug_assert!(key.starts_with(prefix));
if !f(key, value) {
return Ok(false)
}
TrieDBRawIterator::new_prefixed(db, prefix)
}
Ok(true)
})
};
.map_err(|e| format!("TrieDB iteration error: {}", e))?;
let result = if let Some(child_info) = child_info {
let db = KeySpacedDB::new(self, child_info.keyspace());
iter(&db)
} else {
iter(self)
};
match result {
Ok(completed) => Ok(completed),
Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes =>
Ok(false),
Err(e) => Err(format!("TrieDB iteration error: {}", e)),
}
}
/// Execute given closure for all key and values starting with prefix.
pub fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], mut f: F) {
let _ = self.trie_iter_inner(
&self.root,
Some(prefix),
|k, v| {
f(&k, &v);
true
},
None,
None,
false,
);
}
/// Returns all `(key, value)` pairs in the trie.
pub fn pairs(&self) -> Vec<(StorageKey, StorageValue)> {
let collect_all = || -> sp_std::result::Result<_, Box<TrieError<H::Out>>> {
self.with_recorder_and_cache(None, |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(self, self.root())
.with_optional_cache(cache)
.with_optional_recorder(recorder)
.build();
let mut v = Vec::new();
for x in trie.iter()? {
let (key, value) = x?;
v.push((key.to_vec(), value.to_vec()));
}
Ok(v)
})
};
match collect_all() {
Ok(v) => v,
Err(e) => {
debug!(target: "trie", "Error extracting trie values: {}", e);
Vec::new()
},
}
}
/// Returns all keys that start with the given `prefix`.
pub fn keys(&self, prefix: &[u8]) -> Vec<StorageKey> {
let mut keys = Vec::new();
self.for_keys_with_prefix(prefix, |k| keys.push(k.to_vec()));
keys
Ok(RawIter {
stop_on_incomplete_database: args.stop_on_incomplete_database,
child_info: args.child_info,
root,
trie_iter,
state: IterState::Pending,
_phantom: Default::default(),
})
}
/// Return the storage root after applying the given `delta`.