Rework storage iterators (#13284)

* Rework storage iterators

* Make sure storage iteration is also accounted for when benchmarking

* Use `trie-db` from crates.io

* Appease clippy

* Bump `trie-bench` to 0.35.0

* Fix tests' compilation

* Update comment to clarify how `IterArgs::start_at` works

* Add extra tests

* Fix iterators on `Client` so that they behave as before

* Add extra `unwrap`s in tests

* More clippy fixes

* Come on clippy, give me a break already

* Rename `allow_missing` to `stop_on_incomplete_database`

* Add `#[inline]` to `with_recorder_and_cache`

* Use `with_recorder_and_cache` in `with_trie_db`; add doc comment

* Simplify code: use `with_trie_db` in `next_storage_key_from_root`

* Remove `expect`s in the benchmarking CLI

* Add extra doc comments

* Move `RawIter` before `TrieBackendEssence` (no code changes; just cut-paste)

* Remove a TODO in tests

* Update comment for `StorageIterator::was_complete`

* Update `trie-db` to 0.25.1
This commit is contained in:
Koute
2023-02-22 16:49:25 +09:00
committed by GitHub
parent 236bbbd5ef
commit f8e3bdad3d
27 changed files with 1097 additions and 742 deletions
+9 -5
View File
@@ -2902,6 +2902,9 @@ name = "hashbrown"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e"
dependencies = [
"ahash 0.8.3",
]
[[package]]
name = "heck"
@@ -8090,6 +8093,7 @@ dependencies = [
name = "sc-client-db"
version = "0.10.0-dev"
dependencies = [
"array-bytes",
"criterion",
"hash-db",
"kitchensink-runtime",
@@ -11354,9 +11358,9 @@ dependencies = [
[[package]]
name = "trie-bench"
version = "0.34.0"
version = "0.35.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fbb0a830db7c42ae97ce4e21b30e2cf9dbcc1b4f7853bd1aedad3d806c281d0"
checksum = "22c1d18c423077531e693e87ace54ed9e4af1e4ce0a3ea8c9aa6608741074e2b"
dependencies = [
"criterion",
"hash-db",
@@ -11370,12 +11374,12 @@ dependencies = [
[[package]]
name = "trie-db"
version = "0.24.0"
version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "004e1e8f92535694b4cb1444dc5a8073ecf0815e3357f729638b9f8fc4062908"
checksum = "3390c0409daaa6027d6681393316f4ccd3ff82e1590a1e4725014e3ae2bf1920"
dependencies = [
"hash-db",
"hashbrown 0.12.3",
"hashbrown 0.13.2",
"log",
"rustc-hex",
"smallvec",
+128 -62
View File
@@ -31,14 +31,13 @@ use sp_runtime::{
Justification, Justifications, StateVersion, Storage,
};
use sp_state_machine::{
backend::AsTrieBackend, ChildStorageCollection, IndexOperation, OffchainChangesCollection,
StorageCollection,
backend::AsTrieBackend, ChildStorageCollection, IndexOperation, IterArgs,
OffchainChangesCollection, StorageCollection, StorageIterator,
};
use sp_storage::{ChildInfo, StorageData, StorageKey};
use std::collections::{HashMap, HashSet};
pub use sp_state_machine::{Backend as StateBackend, KeyValueStates};
use std::marker::PhantomData;
/// Extracts the state backend type for the given backend.
pub type StateBackendFor<B, Block> = <B as Backend<Block>>::State;
@@ -303,32 +302,61 @@ pub trait AuxStore {
}
/// An `Iterator` that iterates keys in a given block under a prefix.
pub struct KeyIterator<State, Block> {
pub struct KeysIter<State, Block>
where
State: StateBackend<HashFor<Block>>,
Block: BlockT,
{
inner: <State as StateBackend<HashFor<Block>>>::RawIter,
state: State,
child_storage: Option<ChildInfo>,
prefix: Option<StorageKey>,
current_key: Vec<u8>,
_phantom: PhantomData<Block>,
skip_if_first: Option<StorageKey>,
}
impl<State, Block> KeyIterator<State, Block> {
/// create a KeyIterator instance
pub fn new(state: State, prefix: Option<StorageKey>, current_key: Vec<u8>) -> Self {
Self { state, child_storage: None, prefix, current_key, _phantom: PhantomData }
impl<State, Block> KeysIter<State, Block>
where
State: StateBackend<HashFor<Block>>,
Block: BlockT,
{
/// Create a new iterator over storage keys.
pub fn new(
state: State,
prefix: Option<&StorageKey>,
start_at: Option<&StorageKey>,
) -> Result<Self, State::Error> {
let mut args = IterArgs::default();
args.prefix = prefix.as_ref().map(|prefix| prefix.0.as_slice());
args.start_at = start_at.as_ref().map(|start_at| start_at.0.as_slice());
let start_at = args.start_at;
Ok(Self {
inner: state.raw_iter(args)?,
state,
skip_if_first: start_at.map(|key| StorageKey(key.to_vec())),
})
}
/// Create a `KeyIterator` instance for a child storage.
/// Create a new iterator over a child storage's keys.
pub fn new_child(
state: State,
child_info: ChildInfo,
prefix: Option<StorageKey>,
current_key: Vec<u8>,
) -> Self {
Self { state, child_storage: Some(child_info), prefix, current_key, _phantom: PhantomData }
prefix: Option<&StorageKey>,
start_at: Option<&StorageKey>,
) -> Result<Self, State::Error> {
let mut args = IterArgs::default();
args.prefix = prefix.as_ref().map(|prefix| prefix.0.as_slice());
args.start_at = start_at.as_ref().map(|start_at| start_at.0.as_slice());
args.child_info = Some(child_info);
let start_at = args.start_at;
Ok(Self {
inner: state.raw_iter(args)?,
state,
skip_if_first: start_at.map(|key| StorageKey(key.to_vec())),
})
}
}
impl<State, Block> Iterator for KeyIterator<State, Block>
impl<State, Block> Iterator for KeysIter<State, Block>
where
Block: BlockT,
State: StateBackend<HashFor<Block>>,
@@ -336,25 +364,78 @@ where
type Item = StorageKey;
fn next(&mut self) -> Option<Self::Item> {
let next_key = if let Some(child_info) = self.child_storage.as_ref() {
self.state.next_child_storage_key(child_info, &self.current_key)
} else {
self.state.next_storage_key(&self.current_key)
}
.ok()
.flatten()?;
// this terminates the iterator the first time it fails.
if let Some(ref prefix) = self.prefix {
if !next_key.starts_with(&prefix.0[..]) {
return None
let key = self.inner.next_key(&self.state)?.ok().map(StorageKey)?;
if let Some(skipped_key) = self.skip_if_first.take() {
if key == skipped_key {
return self.next()
}
}
self.current_key = next_key.clone();
Some(StorageKey(next_key))
Some(key)
}
}
/// Provides acess to storage primitives
/// An `Iterator` that iterates keys and values in a given block under a prefix.
pub struct PairsIter<State, Block>
where
State: StateBackend<HashFor<Block>>,
Block: BlockT,
{
inner: <State as StateBackend<HashFor<Block>>>::RawIter,
state: State,
skip_if_first: Option<StorageKey>,
}
impl<State, Block> Iterator for PairsIter<State, Block>
where
Block: BlockT,
State: StateBackend<HashFor<Block>>,
{
type Item = (StorageKey, StorageData);
fn next(&mut self) -> Option<Self::Item> {
let (key, value) = self
.inner
.next_pair(&self.state)?
.ok()
.map(|(key, value)| (StorageKey(key), StorageData(value)))?;
if let Some(skipped_key) = self.skip_if_first.take() {
if key == skipped_key {
return self.next()
}
}
Some((key, value))
}
}
impl<State, Block> PairsIter<State, Block>
where
State: StateBackend<HashFor<Block>>,
Block: BlockT,
{
/// Create a new iterator over storage key and value pairs.
pub fn new(
state: State,
prefix: Option<&StorageKey>,
start_at: Option<&StorageKey>,
) -> Result<Self, State::Error> {
let mut args = IterArgs::default();
args.prefix = prefix.as_ref().map(|prefix| prefix.0.as_slice());
args.start_at = start_at.as_ref().map(|start_at| start_at.0.as_slice());
let start_at = args.start_at;
Ok(Self {
inner: state.raw_iter(args)?,
state,
skip_if_first: start_at.map(|key| StorageKey(key.to_vec())),
})
}
}
/// Provides access to storage primitives
pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
/// Given a block's `Hash` and a key, return the value under the key in that block.
fn storage(
@@ -363,13 +444,6 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
key: &StorageKey,
) -> sp_blockchain::Result<Option<StorageData>>;
/// Given a block's `Hash` and a key prefix, return the matching storage keys in that block.
fn storage_keys(
&self,
hash: Block::Hash,
key_prefix: &StorageKey,
) -> sp_blockchain::Result<Vec<StorageKey>>;
/// Given a block's `Hash` and a key, return the value under the hash in that block.
fn storage_hash(
&self,
@@ -377,22 +451,23 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
key: &StorageKey,
) -> sp_blockchain::Result<Option<Block::Hash>>;
/// Given a block's `Hash` and a key prefix, return the matching child storage keys and values
/// in that block.
fn storage_pairs(
&self,
hash: Block::Hash,
key_prefix: &StorageKey,
) -> sp_blockchain::Result<Vec<(StorageKey, StorageData)>>;
/// Given a block's `Hash` and a key prefix, return a `KeyIterator` iterates matching storage
/// Given a block's `Hash` and a key prefix, returns a `KeysIter` iterates matching storage
/// keys in that block.
fn storage_keys_iter(
fn storage_keys(
&self,
hash: Block::Hash,
prefix: Option<&StorageKey>,
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<KeyIterator<B::State, Block>>;
) -> sp_blockchain::Result<KeysIter<B::State, Block>>;
/// Given a block's `Hash` and a key prefix, returns an iterator over the storage keys and
/// values in that block.
fn storage_pairs(
&self,
hash: <Block as BlockT>::Hash,
prefix: Option<&StorageKey>,
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<PairsIter<B::State, Block>>;
/// Given a block's `Hash`, a key and a child storage key, return the value under the key in
/// that block.
@@ -403,24 +478,15 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
key: &StorageKey,
) -> sp_blockchain::Result<Option<StorageData>>;
/// Given a block's `Hash`, a key prefix, and a child storage key, return the matching child
/// storage keys.
fn child_storage_keys(
&self,
hash: Block::Hash,
child_info: &ChildInfo,
key_prefix: &StorageKey,
) -> sp_blockchain::Result<Vec<StorageKey>>;
/// Given a block's `Hash` and a key `prefix` and a child storage key,
/// return a `KeyIterator` that iterates matching storage keys in that block.
fn child_storage_keys_iter(
/// returns a `KeysIter` that iterates matching storage keys in that block.
fn child_storage_keys(
&self,
hash: Block::Hash,
child_info: ChildInfo,
prefix: Option<&StorageKey>,
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<KeyIterator<B::State, Block>>;
) -> sp_blockchain::Result<KeysIter<B::State, Block>>;
/// Given a block's `Hash`, a key and a child storage key, return the hash under the key in that
/// block.
+1
View File
@@ -44,6 +44,7 @@ quickcheck = { version = "1.0.3", default-features = false }
kitchensink-runtime = { path = "../../bin/node/runtime" }
sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" }
substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" }
array-bytes = "4.1"
[features]
default = []
+134 -100
View File
@@ -22,6 +22,7 @@ use crate::{DbState, DbStateBuilder};
use hash_db::{Hasher, Prefix};
use kvdb::{DBTransaction, KeyValueDB};
use linked_hash_map::LinkedHashMap;
use parking_lot::Mutex;
use sp_core::{
hexdisplay::HexDisplay,
storage::{ChildInfo, TrackedStorageKey},
@@ -31,7 +32,8 @@ use sp_runtime::{
StateVersion, Storage,
};
use sp_state_machine::{
backend::Backend as StateBackend, ChildStorageCollection, DBValue, StorageCollection,
backend::Backend as StateBackend, ChildStorageCollection, DBValue, IterArgs, StorageCollection,
StorageIterator, StorageKey, StorageValue,
};
use sp_trie::{
cache::{CacheSize, SharedTrieCache},
@@ -59,6 +61,19 @@ impl<Block: BlockT> sp_state_machine::Storage<HashFor<Block>> for StorageDb<Bloc
}
}
struct KeyTracker {
enable_tracking: bool,
/// Key tracker for keys in the main trie.
/// We track the total number of reads and writes to these keys,
/// not de-duplicated for repeats.
main_keys: LinkedHashMap<Vec<u8>, TrackedStorageKey>,
/// Key tracker for keys in a child trie.
/// Child trie are identified by their storage key (i.e. `ChildInfo::storage_key()`)
/// We track the total number of reads and writes to these keys,
/// not de-duplicated for repeats.
child_keys: LinkedHashMap<Vec<u8>, LinkedHashMap<Vec<u8>, TrackedStorageKey>>,
}
/// State that manages the backend database reference. Allows runtime to control the database.
pub struct BenchmarkingState<B: BlockT> {
root: Cell<B::Hash>,
@@ -67,22 +82,52 @@ pub struct BenchmarkingState<B: BlockT> {
db: Cell<Option<Arc<dyn KeyValueDB>>>,
genesis: HashMap<Vec<u8>, (Vec<u8>, i32)>,
record: Cell<Vec<Vec<u8>>>,
/// Key tracker for keys in the main trie.
/// We track the total number of reads and writes to these keys,
/// not de-duplicated for repeats.
main_key_tracker: RefCell<LinkedHashMap<Vec<u8>, TrackedStorageKey>>,
/// Key tracker for keys in a child trie.
/// Child trie are identified by their storage key (i.e. `ChildInfo::storage_key()`)
/// We track the total number of reads and writes to these keys,
/// not de-duplicated for repeats.
child_key_tracker: RefCell<LinkedHashMap<Vec<u8>, LinkedHashMap<Vec<u8>, TrackedStorageKey>>>,
key_tracker: Arc<Mutex<KeyTracker>>,
whitelist: RefCell<Vec<TrackedStorageKey>>,
proof_recorder: Option<sp_trie::recorder::Recorder<HashFor<B>>>,
proof_recorder_root: Cell<B::Hash>,
enable_tracking: bool,
shared_trie_cache: SharedTrieCache<HashFor<B>>,
}
/// A raw iterator over the `BenchmarkingState`.
pub struct RawIter<B: BlockT> {
inner: <DbState<B> as StateBackend<HashFor<B>>>::RawIter,
child_trie: Option<Vec<u8>>,
key_tracker: Arc<Mutex<KeyTracker>>,
}
impl<B: BlockT> StorageIterator<HashFor<B>> for RawIter<B> {
type Backend = BenchmarkingState<B>;
type Error = String;
fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
match self.inner.next_key(backend.state.borrow().as_ref()?) {
Some(Ok(key)) => {
self.key_tracker.lock().add_read_key(self.child_trie.as_deref(), &key);
Some(Ok(key))
},
result => result,
}
}
fn next_pair(
&mut self,
backend: &Self::Backend,
) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
match self.inner.next_pair(backend.state.borrow().as_ref()?) {
Some(Ok((key, value))) => {
self.key_tracker.lock().add_read_key(self.child_trie.as_deref(), &key);
Some(Ok((key, value)))
},
result => result,
}
}
fn was_complete(&self) -> bool {
self.inner.was_complete()
}
}
impl<B: BlockT> BenchmarkingState<B> {
/// Create a new instance that creates a database in a temporary dir.
pub fn new(
@@ -103,12 +148,14 @@ impl<B: BlockT> BenchmarkingState<B> {
genesis: Default::default(),
genesis_root: Default::default(),
record: Default::default(),
main_key_tracker: Default::default(),
child_key_tracker: Default::default(),
key_tracker: Arc::new(Mutex::new(KeyTracker {
main_keys: Default::default(),
child_keys: Default::default(),
enable_tracking,
})),
whitelist: Default::default(),
proof_recorder: record_proof.then(Default::default),
proof_recorder_root: Cell::new(root),
enable_tracking,
// Enable the cache, but do not sync anything to the shared state.
shared_trie_cache: SharedTrieCache::new(CacheSize::new(0)),
};
@@ -123,7 +170,7 @@ impl<B: BlockT> BenchmarkingState<B> {
)
});
let (root, transaction): (B::Hash, _) =
state.state.borrow_mut().as_mut().unwrap().full_storage_root(
state.state.borrow().as_ref().unwrap().full_storage_root(
genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))),
child_delta,
state_version,
@@ -157,36 +204,51 @@ impl<B: BlockT> BenchmarkingState<B> {
}
fn add_whitelist_to_tracker(&self) {
let mut main_key_tracker = self.main_key_tracker.borrow_mut();
let whitelist = self.whitelist.borrow();
whitelist.iter().for_each(|key| {
let mut whitelisted = TrackedStorageKey::new(key.key.clone());
whitelisted.whitelist();
main_key_tracker.insert(key.key.clone(), whitelisted);
});
self.key_tracker.lock().add_whitelist(&self.whitelist.borrow());
}
fn wipe_tracker(&self) {
*self.main_key_tracker.borrow_mut() = LinkedHashMap::new();
*self.child_key_tracker.borrow_mut() = LinkedHashMap::new();
self.add_whitelist_to_tracker();
let mut key_tracker = self.key_tracker.lock();
key_tracker.main_keys = LinkedHashMap::new();
key_tracker.child_keys = LinkedHashMap::new();
key_tracker.add_whitelist(&self.whitelist.borrow());
}
fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) {
self.key_tracker.lock().add_read_key(childtrie, key);
}
fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) {
self.key_tracker.lock().add_write_key(childtrie, key);
}
fn all_trackers(&self) -> Vec<TrackedStorageKey> {
self.key_tracker.lock().all_trackers()
}
}
impl KeyTracker {
fn add_whitelist(&mut self, whitelist: &[TrackedStorageKey]) {
whitelist.iter().for_each(|key| {
let mut whitelisted = TrackedStorageKey::new(key.key.clone());
whitelisted.whitelist();
self.main_keys.insert(key.key.clone(), whitelisted);
});
}
// Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`)
fn add_read_key(&self, childtrie: Option<&[u8]>, key: &[u8]) {
fn add_read_key(&mut self, childtrie: Option<&[u8]>, key: &[u8]) {
if !self.enable_tracking {
return
}
let mut child_key_tracker = self.child_key_tracker.borrow_mut();
let mut main_key_tracker = self.main_key_tracker.borrow_mut();
let child_key_tracker = &mut self.child_keys;
let main_key_tracker = &mut self.main_keys;
let key_tracker = if let Some(childtrie) = childtrie {
child_key_tracker.entry(childtrie.to_vec()).or_insert_with(LinkedHashMap::new)
} else {
&mut main_key_tracker
main_key_tracker
};
let should_log = match key_tracker.get_mut(key) {
@@ -216,18 +278,18 @@ impl<B: BlockT> BenchmarkingState<B> {
}
// Childtrie is identified by its storage key (i.e. `ChildInfo::storage_key`)
fn add_write_key(&self, childtrie: Option<&[u8]>, key: &[u8]) {
fn add_write_key(&mut self, childtrie: Option<&[u8]>, key: &[u8]) {
if !self.enable_tracking {
return
}
let mut child_key_tracker = self.child_key_tracker.borrow_mut();
let mut main_key_tracker = self.main_key_tracker.borrow_mut();
let child_key_tracker = &mut self.child_keys;
let main_key_tracker = &mut self.main_keys;
let key_tracker = if let Some(childtrie) = childtrie {
child_key_tracker.entry(childtrie.to_vec()).or_insert_with(LinkedHashMap::new)
} else {
&mut main_key_tracker
main_key_tracker
};
// If we have written to the key, we also consider that we have read from it.
@@ -261,11 +323,11 @@ impl<B: BlockT> BenchmarkingState<B> {
fn all_trackers(&self) -> Vec<TrackedStorageKey> {
let mut all_trackers = Vec::new();
self.main_key_tracker.borrow().iter().for_each(|(_, tracker)| {
self.main_keys.iter().for_each(|(_, tracker)| {
all_trackers.push(tracker.clone());
});
self.child_key_tracker.borrow().iter().for_each(|(_, child_tracker)| {
self.child_keys.iter().for_each(|(_, child_tracker)| {
child_tracker.iter().for_each(|(_, tracker)| {
all_trackers.push(tracker.clone());
});
@@ -283,6 +345,7 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
type Error = <DbState<B> as StateBackend<HashFor<B>>>::Error;
type Transaction = <DbState<B> as StateBackend<HashFor<B>>>::Transaction;
type TrieBackendStorage = <DbState<B> as StateBackend<HashFor<B>>>::TrieBackendStorage;
type RawIter = RawIter<B>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.add_read_key(None, key);
@@ -356,58 +419,6 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
if let Some(ref state) = *self.state.borrow() {
state.for_keys_with_prefix(prefix, f)
}
}
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) {
if let Some(ref state) = *self.state.borrow() {
state.for_key_values_with_prefix(prefix, f)
}
}
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result<bool, Self::Error> {
self.state.borrow().as_ref().ok_or_else(state_err)?.apply_to_key_values_while(
child_info,
prefix,
start_at,
f,
allow_missing,
)
}
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
if let Some(ref state) = *self.state.borrow() {
state.apply_to_keys_while(child_info, prefix, start_at, f)
}
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
if let Some(ref state) = *self.state.borrow() {
state.for_child_keys_with_prefix(child_info, prefix, f)
}
}
fn storage_root<'a>(
&self,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
@@ -437,19 +448,19 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
.map_or(Default::default(), |s| s.child_storage_root(child_info, delta, state_version))
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.state.borrow().as_ref().map_or(Default::default(), |s| s.pairs())
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.state.borrow().as_ref().map_or(Default::default(), |s| s.keys(prefix))
}
fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec<Vec<u8>> {
fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
let child_trie =
args.child_info.as_ref().map(|child_info| child_info.storage_key().to_vec());
self.state
.borrow()
.as_ref()
.map_or(Default::default(), |s| s.child_keys(child_info, prefix))
.map(|s| s.raw_iter(args))
.unwrap_or(Ok(Default::default()))
.map(|raw_iter| RawIter {
inner: raw_iter,
key_tracker: self.key_tracker.clone(),
child_trie,
})
}
fn commit(
@@ -587,7 +598,7 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
}
fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) {
self.state.borrow_mut().as_mut().map(|s| s.register_overlay_stats(stats));
self.state.borrow().as_ref().map(|s| s.register_overlay_stats(stats));
}
fn usage_info(&self) -> sp_state_machine::UsageInfo {
@@ -639,6 +650,29 @@ mod test {
use crate::bench::BenchmarkingState;
use sp_state_machine::backend::Backend as _;
fn hex(hex: &str) -> Vec<u8> {
array_bytes::hex2bytes(hex).unwrap()
}
#[test]
fn iteration_is_also_counted_in_rw_counts() {
let storage = sp_runtime::Storage {
top: vec![(
hex("ce6e1397e668c7fcf47744350dc59688455a2c2dbd2e2a649df4e55d93cd7158"),
hex("0102030405060708"),
)]
.into_iter()
.collect(),
..sp_runtime::Storage::default()
};
let bench_state =
BenchmarkingState::<crate::tests::Block>::new(storage, None, false, true).unwrap();
assert_eq!(bench_state.read_write_count(), (0, 0, 0, 0));
assert_eq!(bench_state.keys(Default::default()).unwrap().count(), 1);
assert_eq!(bench_state.read_write_count(), (1, 0, 0, 0));
}
#[test]
fn read_to_main_and_child_tries() {
let bench_state =
+31 -51
View File
@@ -86,8 +86,9 @@ use sp_runtime::{
};
use sp_state_machine::{
backend::{AsTrieBackend, Backend as StateBackend},
ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats,
StorageCollection, UsageInfo as StateUsageInfo,
ChildStorageCollection, DBValue, IndexOperation, IterArgs, OffchainChangesCollection,
StateMachineStats, StorageCollection, StorageIterator, StorageKey, StorageValue,
UsageInfo as StateUsageInfo,
};
use sp_trie::{cache::SharedTrieCache, prefixed_key, MemoryDB, PrefixedMemoryDB};
@@ -159,10 +160,36 @@ impl<Block: BlockT> std::fmt::Debug for RefTrackingState<Block> {
}
}
/// A raw iterator over the `RefTrackingState`.
pub struct RawIter<B: BlockT> {
inner: <DbState<B> as StateBackend<HashFor<B>>>::RawIter,
}
impl<B: BlockT> StorageIterator<HashFor<B>> for RawIter<B> {
type Backend = RefTrackingState<B>;
type Error = <DbState<B> as StateBackend<HashFor<B>>>::Error;
fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
self.inner.next_key(&backend.state)
}
fn next_pair(
&mut self,
backend: &Self::Backend,
) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
self.inner.next_pair(&backend.state)
}
fn was_complete(&self) -> bool {
self.inner.was_complete()
}
}
impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
type Error = <DbState<B> as StateBackend<HashFor<B>>>::Error;
type Transaction = <DbState<B> as StateBackend<HashFor<B>>>::Transaction;
type TrieBackendStorage = <DbState<B> as StateBackend<HashFor<B>>>::TrieBackendStorage;
type RawIter = RawIter<B>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.storage(key)
@@ -212,45 +239,6 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
self.state.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.state.for_keys_with_prefix(prefix, f)
}
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) {
self.state.for_key_values_with_prefix(prefix, f)
}
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result<bool, Self::Error> {
self.state
.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing)
}
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.state.apply_to_keys_while(child_info, prefix, start_at, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.state.for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<'a>(
&self,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
@@ -274,16 +262,8 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
self.state.child_storage_root(child_info, delta, state_version)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.state.pairs()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.state.keys(prefix)
}
fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec<Vec<u8>> {
self.state.child_keys(child_info, prefix)
fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
self.state.raw_iter(args).map(|inner| RawIter { inner })
}
fn register_overlay_stats(&self, stats: &StateMachineStats) {
+36 -50
View File
@@ -26,7 +26,7 @@ use sp_runtime::{
};
use sp_state_machine::{
backend::{AsTrieBackend, Backend as StateBackend},
TrieBackend,
IterArgs, StorageIterator, StorageKey, StorageValue, TrieBackend,
};
use std::sync::Arc;
@@ -73,10 +73,43 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> RecordStatsState<S, B> {
}
}
pub struct RawIter<S, B>
where
S: StateBackend<HashFor<B>>,
B: BlockT,
{
inner: <S as StateBackend<HashFor<B>>>::RawIter,
}
impl<S, B> StorageIterator<HashFor<B>> for RawIter<S, B>
where
S: StateBackend<HashFor<B>>,
B: BlockT,
{
type Backend = RecordStatsState<S, B>;
type Error = S::Error;
fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey, Self::Error>> {
self.inner.next_key(&backend.state)
}
fn next_pair(
&mut self,
backend: &Self::Backend,
) -> Option<Result<(StorageKey, StorageValue), Self::Error>> {
self.inner.next_pair(&backend.state)
}
fn was_complete(&self) -> bool {
self.inner.was_complete()
}
}
impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for RecordStatsState<S, B> {
type Error = S::Error;
type Transaction = S::Transaction;
type TrieBackendStorage = S::TrieBackendStorage;
type RawIter = RawIter<S, B>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
let value = self.state.storage(key)?;
@@ -122,28 +155,6 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Record
self.state.exists_child_storage(child_info, key)
}
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result<bool, Self::Error> {
self.state
.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing)
}
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.state.apply_to_keys_while(child_info, prefix, start_at, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.next_storage_key(key)
}
@@ -156,23 +167,6 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Record
self.state.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.state.for_keys_with_prefix(prefix, f)
}
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) {
self.state.for_key_values_with_prefix(prefix, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.state.for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<'a>(
&self,
delta: impl Iterator<Item = (&'a [u8], Option<&'a [u8]>)>,
@@ -196,16 +190,8 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Record
self.state.child_storage_root(child_info, delta, state_version)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.state.pairs()
}
fn keys(&self, prefix: &[u8]) -> Vec<Vec<u8>> {
self.state.keys(prefix)
}
fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec<Vec<u8>> {
self.state.child_keys(child_info, prefix)
fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
self.state.raw_iter(args).map(|inner| RawIter { inner })
}
fn register_overlay_stats(&self, stats: &sp_state_machine::StateMachineStats) {
+14 -8
View File
@@ -213,23 +213,29 @@ where
.map_err(client_err)
}
// TODO: This is horribly broken; either remove it, or make it streaming.
fn storage_keys(
&self,
block: Option<Block::Hash>,
prefix: StorageKey,
) -> std::result::Result<Vec<StorageKey>, Error> {
// TODO: Remove the `.collect`.
self.block_or_best(block)
.and_then(|block| self.client.storage_keys(block, &prefix))
.and_then(|block| self.client.storage_keys(block, Some(&prefix), None))
.map(|iter| iter.collect())
.map_err(client_err)
}
// TODO: This is horribly broken; either remove it, or make it streaming.
fn storage_pairs(
&self,
block: Option<Block::Hash>,
prefix: StorageKey,
) -> std::result::Result<Vec<(StorageKey, StorageData)>, Error> {
// TODO: Remove the `.collect`.
self.block_or_best(block)
.and_then(|block| self.client.storage_pairs(block, &prefix))
.and_then(|block| self.client.storage_pairs(block, Some(&prefix), None))
.map(|iter| iter.collect())
.map_err(client_err)
}
@@ -241,9 +247,7 @@ where
start_key: Option<StorageKey>,
) -> std::result::Result<Vec<StorageKey>, Error> {
self.block_or_best(block)
.and_then(|block| {
self.client.storage_keys_iter(block, prefix.as_ref(), start_key.as_ref())
})
.and_then(|block| self.client.storage_keys(block, prefix.as_ref(), start_key.as_ref()))
.map(|iter| iter.take(count as usize).collect())
.map_err(client_err)
}
@@ -284,7 +288,7 @@ where
}
// The key doesn't point to anything, so it's probably a prefix.
let iter = match client.storage_keys_iter(block, Some(&key), None).map_err(client_err) {
let iter = match client.storage_keys(block, Some(&key), None).map_err(client_err) {
Ok(iter) => iter,
Err(e) => return Ok(Err(e)),
};
@@ -531,6 +535,7 @@ where
storage_key: PrefixedStorageKey,
prefix: StorageKey,
) -> std::result::Result<Vec<StorageKey>, Error> {
// TODO: Remove the `.collect`.
self.block_or_best(block)
.and_then(|block| {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
@@ -538,8 +543,9 @@ where
ChildInfo::new_default(storage_key),
None => return Err(sp_blockchain::Error::InvalidChildStorageKey),
};
self.client.child_storage_keys(block, &child_info, &prefix)
self.client.child_storage_keys(block, child_info, Some(&prefix), None)
})
.map(|iter| iter.collect())
.map_err(client_err)
}
@@ -558,7 +564,7 @@ where
ChildInfo::new_default(storage_key),
None => return Err(sp_blockchain::Error::InvalidChildStorageKey),
};
self.client.child_storage_keys_iter(
self.client.child_storage_keys(
block,
child_info,
prefix.as_ref(),
@@ -21,7 +21,10 @@ use sc_client_api::{StorageProvider, UsageProvider};
use sp_core::storage::{well_known_keys, ChildInfo, Storage, StorageChild, StorageKey, StorageMap};
use sp_runtime::traits::Block as BlockT;
use std::{collections::HashMap, sync::Arc};
use std::{
collections::{BTreeMap, HashMap},
sync::Arc,
};
/// Export the raw state at the given `block`. If `block` is `None`, the
/// best block will be used.
@@ -31,35 +34,30 @@ where
B: BlockT,
BA: sc_client_api::backend::Backend<B>,
{
let empty_key = StorageKey(Vec::new());
let mut top_storage = client.storage_pairs(hash, &empty_key)?;
let mut top = BTreeMap::new();
let mut children_default = HashMap::new();
// Remove all default child storage roots from the top storage and collect the child storage
// pairs.
while let Some(pos) = top_storage
.iter()
.position(|(k, _)| k.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX))
{
let (key, _) = top_storage.swap_remove(pos);
let key =
StorageKey(key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec());
let child_info = ChildInfo::new_default(&key.0);
let keys = client.child_storage_keys(hash, &child_info, &empty_key)?;
let mut pairs = StorageMap::new();
keys.into_iter().try_for_each(|k| {
if let Some(value) = client.child_storage(hash, &child_info, &k)? {
pairs.insert(k.0, value.0);
for (key, value) in client.storage_pairs(hash, None, None)? {
// Remove all default child storage roots from the top storage and collect the child storage
// pairs.
if key.0.starts_with(well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX) {
let child_root_key = StorageKey(
key.0[well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..].to_vec(),
);
let child_info = ChildInfo::new_default(&child_root_key.0);
let mut pairs = StorageMap::new();
for child_key in client.child_storage_keys(hash, child_info.clone(), None, None)? {
if let Some(child_value) = client.child_storage(hash, &child_info, &child_key)? {
pairs.insert(child_key.0, child_value.0);
}
}
Ok::<_, Error>(())
})?;
children_default.insert(child_root_key.0, StorageChild { child_info, data: pairs });
continue
}
children_default.insert(key.0, StorageChild { child_info, data: pairs });
top.insert(key.0, value.0);
}
let top = top_storage.into_iter().map(|(k, v)| (k.0, v.0)).collect();
Ok(Storage { top, children_default })
}
+20 -50
View File
@@ -40,8 +40,8 @@ use sc_client_api::{
},
execution_extensions::ExecutionExtensions,
notifications::{StorageEventStream, StorageNotifications},
CallExecutor, ExecutorProvider, KeyIterator, OnFinalityAction, OnImportAction, ProofProvider,
UsageProvider,
CallExecutor, ExecutorProvider, KeysIter, OnFinalityAction, OnImportAction, PairsIter,
ProofProvider, UsageProvider,
};
use sc_consensus::{
BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction,
@@ -1462,52 +1462,37 @@ where
Block: BlockT,
{
fn storage_keys(
&self,
hash: Block::Hash,
key_prefix: &StorageKey,
) -> sp_blockchain::Result<Vec<StorageKey>> {
let keys = self.state_at(hash)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect();
Ok(keys)
}
fn storage_pairs(
&self,
hash: <Block as BlockT>::Hash,
key_prefix: &StorageKey,
) -> sp_blockchain::Result<Vec<(StorageKey, StorageData)>> {
let state = self.state_at(hash)?;
let keys = state
.keys(&key_prefix.0)
.into_iter()
.map(|k| {
let d = state.storage(&k).ok().flatten().unwrap_or_default();
(StorageKey(k), StorageData(d))
})
.collect();
Ok(keys)
}
fn storage_keys_iter(
&self,
hash: <Block as BlockT>::Hash,
prefix: Option<&StorageKey>,
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<KeyIterator<B::State, Block>> {
) -> sp_blockchain::Result<KeysIter<B::State, Block>> {
let state = self.state_at(hash)?;
let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new);
Ok(KeyIterator::new(state, prefix.cloned(), start_key))
KeysIter::new(state, prefix, start_key)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
}
fn child_storage_keys_iter(
fn child_storage_keys(
&self,
hash: <Block as BlockT>::Hash,
child_info: ChildInfo,
prefix: Option<&StorageKey>,
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<KeyIterator<B::State, Block>> {
) -> sp_blockchain::Result<KeysIter<B::State, Block>> {
let state = self.state_at(hash)?;
let start_key = start_key.or(prefix).map(|key| key.0.clone()).unwrap_or_else(Vec::new);
Ok(KeyIterator::new_child(state, child_info, prefix.cloned(), start_key))
KeysIter::new_child(state, child_info, prefix, start_key)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
}
fn storage_pairs(
&self,
hash: <Block as BlockT>::Hash,
prefix: Option<&StorageKey>,
start_key: Option<&StorageKey>,
) -> sp_blockchain::Result<PairsIter<B::State, Block>> {
let state = self.state_at(hash)?;
PairsIter::new(state, prefix, start_key)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
}
fn storage(
@@ -1532,21 +1517,6 @@ where
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))
}
fn child_storage_keys(
&self,
hash: <Block as BlockT>::Hash,
child_info: &ChildInfo,
key_prefix: &StorageKey,
) -> sp_blockchain::Result<Vec<StorageKey>> {
let keys = self
.state_at(hash)?
.child_keys(child_info, &key_prefix.0)
.into_iter()
.map(StorageKey)
.collect();
Ok(keys)
}
fn child_storage(
&self,
hash: <Block as BlockT>::Hash,
+84 -18
View File
@@ -341,7 +341,20 @@ fn block_builder_works_with_transactions() {
.expect("block 1 was just imported. qed");
assert_eq!(client.chain_info().best_number, 1);
assert_ne!(client.state_at(hash1).unwrap().pairs(), client.state_at(hash0).unwrap().pairs());
assert_ne!(
client
.state_at(hash1)
.unwrap()
.pairs(Default::default())
.unwrap()
.collect::<Vec<_>>(),
client
.state_at(hash0)
.unwrap()
.pairs(Default::default())
.unwrap()
.collect::<Vec<_>>()
);
assert_eq!(
client
.runtime_api()
@@ -394,8 +407,18 @@ fn block_builder_does_not_include_invalid() {
assert_eq!(client.chain_info().best_number, 1);
assert_ne!(
client.state_at(hashof1).unwrap().pairs(),
client.state_at(hashof0).unwrap().pairs()
client
.state_at(hashof1)
.unwrap()
.pairs(Default::default())
.unwrap()
.collect::<Vec<_>>(),
client
.state_at(hashof0)
.unwrap()
.pairs(Default::default())
.unwrap()
.collect::<Vec<_>>()
);
assert_eq!(client.body(hashof1).unwrap().unwrap().len(), 1)
}
@@ -1688,7 +1711,7 @@ fn returns_status_for_pruned_blocks() {
}
#[test]
fn storage_keys_iter_prefix_and_start_key_works() {
fn storage_keys_prefix_and_start_key_works() {
let child_info = ChildInfo::new_default(b"child");
let client = TestClientBuilder::new()
.add_extra_child_storage(&child_info, b"first".to_vec(), vec![0u8; 32])
@@ -1703,7 +1726,7 @@ fn storage_keys_iter_prefix_and_start_key_works() {
let child_prefix = StorageKey(b"sec".to_vec());
let res: Vec<_> = client
.storage_keys_iter(block_hash, Some(&prefix), None)
.storage_keys(block_hash, Some(&prefix), None)
.unwrap()
.map(|x| x.0)
.collect();
@@ -1717,7 +1740,7 @@ fn storage_keys_iter_prefix_and_start_key_works() {
);
let res: Vec<_> = client
.storage_keys_iter(
.storage_keys(
block_hash,
Some(&prefix),
Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))),
@@ -1728,7 +1751,7 @@ fn storage_keys_iter_prefix_and_start_key_works() {
assert_eq!(res, [array_bytes::hex2bytes_unchecked("3a686561707061676573")]);
let res: Vec<_> = client
.storage_keys_iter(
.storage_keys(
block_hash,
Some(&prefix),
Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a686561707061676573"))),
@@ -1739,19 +1762,14 @@ fn storage_keys_iter_prefix_and_start_key_works() {
assert_eq!(res, Vec::<Vec<u8>>::new());
let res: Vec<_> = client
.child_storage_keys_iter(block_hash, child_info.clone(), Some(&child_prefix), None)
.child_storage_keys(block_hash, child_info.clone(), Some(&child_prefix), None)
.unwrap()
.map(|x| x.0)
.collect();
assert_eq!(res, [b"second".to_vec()]);
let res: Vec<_> = client
.child_storage_keys_iter(
block_hash,
child_info,
None,
Some(&StorageKey(b"second".to_vec())),
)
.child_storage_keys(block_hash, child_info, None, Some(&StorageKey(b"second".to_vec())))
.unwrap()
.map(|x| x.0)
.collect();
@@ -1759,7 +1777,7 @@ fn storage_keys_iter_prefix_and_start_key_works() {
}
#[test]
fn storage_keys_iter_works() {
fn storage_keys_works() {
let client = substrate_test_runtime_client::new();
let block_hash = client.info().best_hash;
@@ -1767,7 +1785,7 @@ fn storage_keys_iter_works() {
let prefix = StorageKey(array_bytes::hex2bytes_unchecked(""));
let res: Vec<_> = client
.storage_keys_iter(block_hash, Some(&prefix), None)
.storage_keys(block_hash, Some(&prefix), None)
.unwrap()
.take(9)
.map(|x| array_bytes::bytes2hex("", &x.0))
@@ -1787,8 +1805,56 @@ fn storage_keys_iter_works() {
]
);
// Starting at an empty key nothing gets skipped.
let res: Vec<_> = client
.storage_keys_iter(
.storage_keys(block_hash, Some(&prefix), Some(&StorageKey("".into())))
.unwrap()
.take(9)
.map(|x| array_bytes::bytes2hex("", &x.0))
.collect();
assert_eq!(
res,
[
"00c232cf4e70a5e343317016dc805bf80a6a8cd8ad39958d56f99891b07851e0",
"085b2407916e53a86efeb8b72dbe338c4b341dab135252f96b6ed8022209b6cb",
"0befda6e1ca4ef40219d588a727f1271",
"1a560ecfd2a62c2b8521ef149d0804eb621050e3988ed97dca55f0d7c3e6aa34",
"1d66850d32002979d67dd29dc583af5b2ae2a1f71c1f35ad90fff122be7a3824",
"237498b98d8803334286e9f0483ef513098dd3c1c22ca21c4dc155b4ef6cc204",
"26aa394eea5630e07c48ae0c9558cef75e0621c4869aa60c02be9adcc98a0d1d",
"29b9db10ec5bf7907d8f74b5e60aa8140c4fbdd8127a1ee5600cb98e5ec01729",
"3a636f6465",
]
);
// Starting at an incomplete key nothing gets skipped.
let res: Vec<_> = client
.storage_keys(
block_hash,
Some(&prefix),
Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f64"))),
)
.unwrap()
.take(8)
.map(|x| array_bytes::bytes2hex("", &x.0))
.collect();
assert_eq!(
res,
[
"3a636f6465",
"3a686561707061676573",
"52008686cc27f6e5ed83a216929942f8bcd32a396f09664a5698f81371934b56",
"5348d72ac6cc66e5d8cbecc27b0e0677503b845fe2382d819f83001781788fd5",
"5c2d5fda66373dabf970e4fb13d277ce91c5233473321129d32b5a8085fa8133",
"6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081",
"66484000ed3f75c95fc7b03f39c20ca1e1011e5999278247d3b2f5e3c3273808",
"7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa",
]
);
// Starting at a complete key the first key is skipped.
let res: Vec<_> = client
.storage_keys(
block_hash,
Some(&prefix),
Some(&StorageKey(array_bytes::hex2bytes_unchecked("3a636f6465"))),
@@ -1811,7 +1877,7 @@ fn storage_keys_iter_works() {
);
let res: Vec<_> = client
.storage_keys_iter(
.storage_keys(
block_hash,
Some(&prefix),
Some(&StorageKey(array_bytes::hex2bytes_unchecked(
@@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
[dependencies]
codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false }
hash-db = { version = "0.15.2", default-features = false }
log = { version = "0.4.17", optional = true }
log = { version = "0.4.17", default-features = false }
parking_lot = { version = "0.12.1", optional = true }
rand = { version = "0.8.5", optional = true }
smallvec = "1.8.0"
@@ -33,7 +33,7 @@ array-bytes = "4.1"
pretty_assertions = "1.2.1"
rand = "0.8.5"
sp-runtime = { version = "7.0.0", path = "../runtime" }
trie-db = "0.24.0"
trie-db = "0.25.1"
assert_matches = "1.5"
[features]
@@ -41,7 +41,7 @@ default = ["std"]
std = [
"codec/std",
"hash-db/std",
"log",
"log/std",
"parking_lot",
"rand",
"sp-core/std",
+213 -23
View File
@@ -24,12 +24,135 @@ use crate::{
StorageKey, StorageValue, UsageInfo,
};
use codec::Encode;
use core::marker::PhantomData;
use hash_db::Hasher;
use sp_core::storage::{ChildInfo, StateVersion, TrackedStorageKey};
#[cfg(feature = "std")]
use sp_core::traits::RuntimeCode;
use sp_std::vec::Vec;
/// A struct containing arguments for iterating over the storage.
#[derive(Default)]
#[non_exhaustive]
pub struct IterArgs<'a> {
/// The prefix of the keys over which to iterate.
pub prefix: Option<&'a [u8]>,
/// The prefix from which to start the iteration from.
///
/// This is inclusive and the iteration will include the key which is specified here.
pub start_at: Option<&'a [u8]>,
/// The info of the child trie over which to iterate over.
pub child_info: Option<ChildInfo>,
/// Whether to stop iteration when a missing trie node is reached.
///
/// When a missing trie node is reached the iterator will:
/// - return an error if this is set to `false` (default)
/// - return `None` if this is set to `true`
pub stop_on_incomplete_database: bool,
}
/// A trait for a raw storage iterator.
pub trait StorageIterator<H>
where
H: Hasher,
{
/// The state backend over which the iterator is iterating.
type Backend;
/// The error type.
type Error;
/// Fetches the next key from the storage.
fn next_key(
&mut self,
backend: &Self::Backend,
) -> Option<core::result::Result<StorageKey, Self::Error>>;
/// Fetches the next key and value from the storage.
fn next_pair(
&mut self,
backend: &Self::Backend,
) -> Option<core::result::Result<(StorageKey, StorageValue), Self::Error>>;
/// Returns whether the end of iteration was reached without an error.
fn was_complete(&self) -> bool;
}
/// An iterator over storage keys and values.
pub struct PairsIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H>,
{
backend: Option<&'a I::Backend>,
raw_iter: I,
_phantom: PhantomData<H>,
}
impl<'a, H, I> Iterator for PairsIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H>,
{
type Item = Result<(Vec<u8>, Vec<u8>), <I as StorageIterator<H>>::Error>;
fn next(&mut self) -> Option<Self::Item> {
self.raw_iter.next_pair(self.backend.as_ref()?)
}
}
impl<'a, H, I> Default for PairsIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H> + Default,
{
fn default() -> Self {
Self {
backend: Default::default(),
raw_iter: Default::default(),
_phantom: Default::default(),
}
}
}
/// An iterator over storage keys.
pub struct KeysIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H>,
{
backend: Option<&'a I::Backend>,
raw_iter: I,
_phantom: PhantomData<H>,
}
impl<'a, H, I> Iterator for KeysIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H>,
{
type Item = Result<Vec<u8>, <I as StorageIterator<H>>::Error>;
fn next(&mut self) -> Option<Self::Item> {
self.raw_iter.next_key(self.backend.as_ref()?)
}
}
impl<'a, H, I> Default for KeysIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H> + Default,
{
fn default() -> Self {
Self {
backend: Default::default(),
raw_iter: Default::default(),
_phantom: Default::default(),
}
}
}
/// A state backend is used to read state data and can have changes committed
/// to it.
///
@@ -44,6 +167,9 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
/// Type of trie backend storage.
type TrieBackendStorage: TrieBackendStorage<H, Overlay = Self::Transaction>;
/// Type of the raw storage iterator.
type RawIter: StorageIterator<H, Backend = Self, Error = Self::Error>;
/// Get keyed storage or None if there is nothing associated.
fn storage(&self, key: &[u8]) -> Result<Option<StorageValue>, Self::Error>;
@@ -95,43 +221,103 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
/// Otherwise an error is produced.
///
/// Returns `true` if trie end is reached.
// TODO: Remove this.
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
mut f: F,
allow_missing: bool,
) -> Result<bool, Self::Error>;
) -> Result<bool, Self::Error> {
let args = IterArgs {
child_info: child_info.cloned(),
prefix,
start_at,
stop_on_incomplete_database: allow_missing,
..IterArgs::default()
};
let mut iter = self.pairs(args)?;
while let Some(key_value) = iter.next() {
let (key, value) = key_value?;
if !f(key, value) {
return Ok(false)
}
}
Ok(iter.raw_iter.was_complete())
}
/// Retrieve all entries keys of storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
// TODO: Remove this.
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
);
mut f: F,
) -> Result<(), Self::Error> {
let args =
IterArgs { child_info: child_info.cloned(), prefix, start_at, ..IterArgs::default() };
for key in self.keys(args)? {
if !f(&key?) {
return Ok(())
}
}
Ok(())
}
/// Retrieve all entries keys which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], mut f: F) {
self.for_key_values_with_prefix(prefix, |k, _v| f(k))
// TODO: Remove this.
fn for_keys_with_prefix<F: FnMut(&[u8])>(
&self,
prefix: &[u8],
mut f: F,
) -> Result<(), Self::Error> {
let args = IterArgs { prefix: Some(prefix), ..IterArgs::default() };
self.keys(args)?.try_for_each(|key| {
f(&key?);
Ok(())
})
}
/// Retrieve all entries keys and values of which start with the given prefix and
/// call `f` for each of those keys.
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F);
// TODO: Remove this.
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(
&self,
prefix: &[u8],
mut f: F,
) -> Result<(), Self::Error> {
let args = IterArgs { prefix: Some(prefix), ..IterArgs::default() };
self.pairs(args)?.try_for_each(|key_value| {
let (key, value) = key_value?;
f(&key, &value);
Ok(())
})
}
/// Retrieve all child entries keys which start with the given prefix and
/// call `f` for each of those keys.
// TODO: Remove this.
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
);
mut f: F,
) -> Result<(), Self::Error> {
let args = IterArgs {
child_info: Some(child_info.clone()),
prefix: Some(prefix),
..IterArgs::default()
};
self.keys(args)?.try_for_each(|key| {
f(&key?);
Ok(())
})
}
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
@@ -156,21 +342,25 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
where
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(StorageKey, StorageValue)>;
/// Returns a lifetimeless raw storage iterator.
fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error>;
/// Get all keys with given prefix
fn keys(&self, prefix: &[u8]) -> Vec<StorageKey> {
let mut all = Vec::new();
self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec()));
all
/// Get an iterator over key/value pairs.
fn pairs<'a>(&'a self, args: IterArgs) -> Result<PairsIter<'a, H, Self::RawIter>, Self::Error> {
Ok(PairsIter {
backend: Some(self),
raw_iter: self.raw_iter(args)?,
_phantom: Default::default(),
})
}
/// Get all keys of child storage with given prefix
fn child_keys(&self, child_info: &ChildInfo, prefix: &[u8]) -> Vec<StorageKey> {
let mut all = Vec::new();
self.for_child_keys_with_prefix(child_info, prefix, |k| all.push(k.to_vec()));
all
/// Get an iterator over keys.
fn keys<'a>(&'a self, args: IterArgs) -> Result<KeysIter<'a, H, Self::RawIter>, Self::Error> {
Ok(KeysIter {
backend: Some(self),
raw_iter: self.raw_iter(args)?,
_phantom: Default::default(),
})
}
/// Calculate the storage root, with given delta over what is already stored
@@ -309,7 +499,7 @@ where
#[cfg(feature = "std")]
pub struct BackendRuntimeCode<'a, B, H> {
backend: &'a B,
_marker: std::marker::PhantomData<H>,
_marker: PhantomData<H>,
}
#[cfg(feature = "std")]
@@ -332,7 +522,7 @@ where
{
/// Create a new instance.
pub fn new(backend: &'a B) -> Self {
Self { backend, _marker: std::marker::PhantomData }
Self { backend, _marker: PhantomData }
}
/// Return the [`RuntimeCode`] build from the wrapped `backend`.
+31 -24
View File
@@ -159,9 +159,10 @@ where
use std::collections::HashMap;
self.backend
.pairs()
.iter()
.map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec())))
.pairs(Default::default())
.expect("never fails in tests; qed.")
.map(|key_value| key_value.expect("never fails in tests; qed."))
.map(|(k, v)| (k, Some(v)))
.chain(self.overlay.changes().map(|(k, v)| (k.clone(), v.value().cloned())))
.collect::<HashMap<_, _>>()
.into_iter()
@@ -757,28 +758,34 @@ where
let mut delete_count: u32 = 0;
let mut loop_count: u32 = 0;
let mut maybe_next_key = None;
self.backend
.apply_to_keys_while(maybe_child, maybe_prefix, maybe_cursor, |key| {
if maybe_limit.map_or(false, |limit| loop_count == limit) {
maybe_next_key = Some(key.to_vec());
return false
}
let overlay = match maybe_child {
Some(child_info) => self.overlay.child_storage(child_info, key),
None => self.overlay.storage(key),
};
if !matches!(overlay, Some(None)) {
// not pending deletion from the backend - delete it.
if let Some(child_info) = maybe_child {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
} else {
self.overlay.set_storage(key.to_vec(), None);
let result =
self.backend
.apply_to_keys_while(maybe_child, maybe_prefix, maybe_cursor, |key| {
if maybe_limit.map_or(false, |limit| loop_count == limit) {
maybe_next_key = Some(key.to_vec());
return false
}
delete_count = delete_count.saturating_add(1);
}
loop_count = loop_count.saturating_add(1);
true
});
let overlay = match maybe_child {
Some(child_info) => self.overlay.child_storage(child_info, key),
None => self.overlay.storage(key),
};
if !matches!(overlay, Some(None)) {
// not pending deletion from the backend - delete it.
if let Some(child_info) = maybe_child {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
} else {
self.overlay.set_storage(key.to_vec(), None);
}
delete_count = delete_count.saturating_add(1);
}
loop_count = loop_count.saturating_add(1);
true
});
if let Err(error) = result {
log::debug!(target: "trie", "Error while iterating the storage: {}", error);
}
(maybe_next_key, delete_count, loop_count)
}
}
@@ -123,7 +123,7 @@ impl sp_std::fmt::Display for DefaultError {
}
pub use crate::{
backend::Backend,
backend::{Backend, IterArgs, KeysIter, PairsIter, StorageIterator},
error::{Error, ExecutionError},
ext::Ext,
overlayed_changes::{
@@ -241,7 +241,12 @@ where
H::Out: Ord + codec::Codec,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "overlay: {:?}\nbackend: {:?}", self.overlay, self.backend.pairs())
let pairs: Vec<_> = self
.backend
.pairs(Default::default())
.expect("creating an iterator over all of the pairs doesn't fail in tests")
.collect();
write!(f, "overlay: {:?}\nbackend: {:?}", self.overlay, pairs)
}
}
@@ -20,6 +20,7 @@
#[cfg(feature = "std")]
use crate::backend::AsTrieBackend;
use crate::{
backend::IterArgs,
trie_backend_essence::{TrieBackendEssence, TrieBackendStorage},
Backend, StorageKey, StorageValue,
};
@@ -28,7 +29,6 @@ use codec::Codec;
use hash_db::HashDB;
use hash_db::Hasher;
use sp_core::storage::{ChildInfo, StateVersion};
use sp_std::vec::Vec;
#[cfg(feature = "std")]
use sp_trie::{cache::LocalTrieCache, recorder::Recorder};
#[cfg(feature = "std")]
@@ -51,6 +51,7 @@ pub trait AsLocalTrieCache<H: Hasher>: sealed::Sealed {
impl<H: Hasher> AsLocalTrieCache<H> for LocalTrieCache<H> {
#[cfg(feature = "std")]
#[inline]
fn as_local_trie_cache(&self) -> &LocalTrieCache<H> {
self
}
@@ -58,6 +59,7 @@ impl<H: Hasher> AsLocalTrieCache<H> for LocalTrieCache<H> {
#[cfg(feature = "std")]
impl<H: Hasher> AsLocalTrieCache<H> for &LocalTrieCache<H> {
#[inline]
fn as_local_trie_cache(&self) -> &LocalTrieCache<H> {
self
}
@@ -236,6 +238,7 @@ where
type Error = crate::DefaultError;
type Transaction = S::Overlay;
type TrieBackendStorage = S;
type RawIter = crate::trie_backend_essence::RawIter<S, H, C>;
fn storage_hash(&self, key: &[u8]) -> Result<Option<H::Out>, Self::Error> {
self.essence.storage_hash(key)
@@ -273,51 +276,8 @@ where
self.essence.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.essence.for_keys_with_prefix(prefix, f)
}
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], f: F) {
self.essence.for_key_values_with_prefix(prefix, f)
}
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result<bool, Self::Error> {
self.essence
.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing)
}
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
self.essence.apply_to_keys_while(child_info, prefix, start_at, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.essence.for_child_keys_with_prefix(child_info, prefix, f)
}
fn pairs(&self) -> Vec<(StorageKey, StorageValue)> {
self.essence.pairs()
}
fn keys(&self, prefix: &[u8]) -> Vec<StorageKey> {
self.essence.keys(prefix)
fn raw_iter(&self, args: IterArgs) -> Result<Self::RawIter, Self::Error> {
self.essence.raw_iter(args)
}
fn storage_root<'a>(
@@ -579,7 +539,11 @@ pub mod tests {
cache: Option<Cache>,
recorder: Option<Recorder>,
) {
assert!(!test_trie(state_version, cache, recorder).pairs().is_empty());
assert!(!test_trie(state_version, cache, recorder)
.pairs(Default::default())
.unwrap()
.next()
.is_none());
}
#[test]
@@ -589,8 +553,163 @@ pub mod tests {
Default::default(),
)
.build()
.pairs()
.is_empty());
.pairs(Default::default())
.unwrap()
.next()
.is_none());
}
parameterized_test!(storage_iteration_works, storage_iteration_works_inner);
fn storage_iteration_works_inner(
state_version: StateVersion,
cache: Option<Cache>,
recorder: Option<Recorder>,
) {
let trie = test_trie(state_version, cache, recorder);
// Fetch everything.
assert_eq!(
trie.keys(Default::default())
.unwrap()
.map(|result| result.unwrap())
.take(5)
.collect::<Vec<_>>(),
vec![
b":child_storage:default:sub1".to_vec(),
b":code".to_vec(),
b"key".to_vec(),
b"value1".to_vec(),
b"value2".to_vec(),
]
);
// Fetch starting at a given key (full key).
assert_eq!(
trie.keys(IterArgs { start_at: Some(b"key"), ..IterArgs::default() })
.unwrap()
.map(|result| result.unwrap())
.take(3)
.collect::<Vec<_>>(),
vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]
);
// Fetch starting at a given key (partial key).
assert_eq!(
trie.keys(IterArgs { start_at: Some(b"ke"), ..IterArgs::default() })
.unwrap()
.map(|result| result.unwrap())
.take(3)
.collect::<Vec<_>>(),
vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]
);
// Fetch starting at a given key (empty key).
assert_eq!(
trie.keys(IterArgs { start_at: Some(b""), ..IterArgs::default() })
.unwrap()
.map(|result| result.unwrap())
.take(5)
.collect::<Vec<_>>(),
vec![
b":child_storage:default:sub1".to_vec(),
b":code".to_vec(),
b"key".to_vec(),
b"value1".to_vec(),
b"value2".to_vec(),
]
);
// Fetch starting at a given key and with prefix which doesn't match that key.
assert!(trie
.keys(IterArgs {
prefix: Some(b"value"),
start_at: Some(b"key"),
..IterArgs::default()
})
.unwrap()
.map(|result| result.unwrap())
.next()
.is_none());
// Fetch starting at a given key and with prefix which does match that key.
assert_eq!(
trie.keys(IterArgs {
prefix: Some(b"value"),
start_at: Some(b"value"),
..IterArgs::default()
})
.unwrap()
.map(|result| result.unwrap())
.collect::<Vec<_>>(),
vec![b"value1".to_vec(), b"value2".to_vec(),]
);
// Also test out the wrapper methods.
// TODO: Remove this once these methods are gone.
let mut list = Vec::new();
assert!(trie
.apply_to_key_values_while(
None,
None,
Some(b"key"),
|key, _| {
list.push(key);
true
},
false
)
.unwrap());
assert_eq!(list[0..3], vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]);
let mut list = Vec::new();
trie.apply_to_keys_while(None, None, Some(b"key"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(list[0..3], vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]);
let mut list = Vec::new();
trie.apply_to_keys_while(None, None, Some(b"k"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(list[0..3], vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]);
let mut list = Vec::new();
trie.apply_to_keys_while(None, None, Some(b""), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(
list[0..5],
vec![
b":child_storage:default:sub1".to_vec(),
b":code".to_vec(),
b"key".to_vec(),
b"value1".to_vec(),
b"value2".to_vec(),
]
);
let mut list = Vec::new();
trie.apply_to_keys_while(None, Some(b"value"), Some(b"key"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert!(list.is_empty());
let mut list = Vec::new();
trie.apply_to_keys_while(None, Some(b"value"), Some(b"value"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(list, vec![b"value1".to_vec(), b"value2".to_vec(),]);
}
parameterized_test!(storage_root_is_non_default, storage_root_is_non_default_inner);
@@ -638,7 +757,8 @@ pub mod tests {
trie.for_keys_with_prefix(b"value", |key| {
let for_first_time = seen.insert(key.to_vec());
assert!(for_first_time, "Seen key '{:?}' more than once", key);
});
})
.unwrap();
let mut expected = HashSet::new();
expected.insert(b"value1".to_vec());
@@ -664,7 +784,8 @@ pub mod tests {
.collect::<Vec<_>>();
let trie = test_trie(state_version, cache, recorder);
let keys = trie.keys(&[]);
let keys: Vec<_> =
trie.keys(Default::default()).unwrap().map(|result| result.unwrap()).collect();
assert_eq!(expected, keys);
}
@@ -724,7 +845,18 @@ pub mod tests {
.with_recorder(Recorder::default())
.build();
assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap());
assert_eq!(trie_backend.pairs(), proving_backend.pairs());
assert_eq!(
trie_backend
.pairs(Default::default())
.unwrap()
.map(|result| result.unwrap())
.collect::<Vec<_>>(),
proving_backend
.pairs(Default::default())
.unwrap()
.map(|result| result.unwrap())
.collect::<Vec<_>>()
);
let (trie_root, mut trie_mdb) =
trie_backend.storage_root(std::iter::empty(), state_version);
@@ -19,24 +19,23 @@
//! from storage.
use crate::{
backend::Consolidate, debug, trie_backend::AsLocalTrieCache, warn, StorageKey, StorageValue,
backend::{Consolidate, IterArgs, StorageIterator},
trie_backend::AsLocalTrieCache,
warn, StorageKey, StorageValue,
};
use codec::Codec;
use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix};
#[cfg(feature = "std")]
use parking_lot::RwLock;
use sp_core::storage::{ChildInfo, ChildType, StateVersion};
#[cfg(not(feature = "std"))]
use sp_std::marker::PhantomData;
use sp_std::{boxed::Box, vec::Vec};
use sp_std::{boxed::Box, marker::PhantomData, vec::Vec};
#[cfg(feature = "std")]
use sp_trie::recorder::Recorder;
use sp_trie::{
child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_hash,
read_child_trie_value, read_trie_value,
trie_types::{TrieDBBuilder, TrieError},
DBValue, KeySpacedDB, NodeCodec, Trie, TrieCache, TrieDBIterator, TrieDBKeyIterator,
TrieRecorder,
DBValue, KeySpacedDB, NodeCodec, Trie, TrieCache, TrieDBRawIterator, TrieRecorder,
};
#[cfg(feature = "std")]
use std::{collections::HashMap, sync::Arc};
@@ -76,6 +75,109 @@ impl<H> Cache<H> {
}
}
enum IterState {
Pending,
FinishedComplete,
FinishedIncomplete,
}
/// A raw iterator over the storage.
pub struct RawIter<S, H, C>
where
H: Hasher,
{
stop_on_incomplete_database: bool,
root: H::Out,
child_info: Option<ChildInfo>,
trie_iter: TrieDBRawIterator<Layout<H>>,
state: IterState,
_phantom: PhantomData<(S, C)>,
}
impl<S, H, C> RawIter<S, H, C>
where
H: Hasher,
S: TrieBackendStorage<H>,
H::Out: Codec + Ord,
C: AsLocalTrieCache<H> + Send + Sync,
{
#[inline]
fn prepare<R>(
&mut self,
backend: &TrieBackendEssence<S, H, C>,
callback: impl FnOnce(
&sp_trie::TrieDB<Layout<H>>,
&mut TrieDBRawIterator<Layout<H>>,
) -> Option<core::result::Result<R, Box<TrieError<<H as Hasher>::Out>>>>,
) -> Option<Result<R>> {
if !matches!(self.state, IterState::Pending) {
return None
}
let result = backend.with_trie_db(self.root, self.child_info.as_ref(), |db| {
callback(&db, &mut self.trie_iter)
});
match result {
Some(Ok(key_value)) => Some(Ok(key_value)),
None => {
self.state = IterState::FinishedComplete;
None
},
Some(Err(error)) => {
self.state = IterState::FinishedIncomplete;
if matches!(*error, TrieError::IncompleteDatabase(_)) &&
self.stop_on_incomplete_database
{
None
} else {
Some(Err(format!("TrieDB iteration error: {}", error)))
}
},
}
}
}
impl<S, H, C> Default for RawIter<S, H, C>
where
H: Hasher,
{
fn default() -> Self {
Self {
stop_on_incomplete_database: false,
child_info: None,
root: Default::default(),
trie_iter: TrieDBRawIterator::empty(),
state: IterState::FinishedComplete,
_phantom: Default::default(),
}
}
}
impl<S, H, C> StorageIterator<H> for RawIter<S, H, C>
where
H: Hasher,
S: TrieBackendStorage<H>,
H::Out: Codec + Ord,
C: AsLocalTrieCache<H> + Send + Sync,
{
type Backend = crate::TrieBackend<S, H, C>;
type Error = crate::DefaultError;
#[inline]
fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey>> {
self.prepare(&backend.essence, |trie, trie_iter| trie_iter.next_key(&trie))
}
#[inline]
fn next_pair(&mut self, backend: &Self::Backend) -> Option<Result<(StorageKey, StorageValue)>> {
self.prepare(&backend.essence, |trie, trie_iter| trie_iter.next_item(&trie))
}
fn was_complete(&self) -> bool {
matches!(self.state, IterState::FinishedComplete)
}
}
/// Patricia trie-based pairs storage essence.
pub struct TrieBackendEssence<S: TrieBackendStorage<H>, H: Hasher, C> {
storage: S,
@@ -168,6 +270,7 @@ impl<S: TrieBackendStorage<H>, H: Hasher, C: AsLocalTrieCache<H>> TrieBackendEss
///
/// If the given `storage_root` is `None`, `self.root` will be used.
#[cfg(feature = "std")]
#[inline]
fn with_recorder_and_cache<R>(
&self,
storage_root: Option<H::Out>,
@@ -193,6 +296,7 @@ impl<S: TrieBackendStorage<H>, H: Hasher, C: AsLocalTrieCache<H>> TrieBackendEss
}
#[cfg(not(feature = "std"))]
#[inline]
fn with_recorder_and_cache<R>(
&self,
_: Option<H::Out>,
@@ -262,6 +366,31 @@ impl<S: TrieBackendStorage<H>, H: Hasher, C: AsLocalTrieCache<H> + Send + Sync>
where
H::Out: Codec + Ord,
{
/// Calls the given closure with a [`TrieDb`] constructed for the given
/// storage root and (optionally) child trie.
#[inline]
fn with_trie_db<R>(
&self,
root: H::Out,
child_info: Option<&ChildInfo>,
callback: impl FnOnce(&sp_trie::TrieDB<Layout<H>>) -> R,
) -> R {
let backend = self as &dyn HashDBRef<H, Vec<u8>>;
let db = child_info
.as_ref()
.map(|child_info| KeySpacedDB::new(backend, child_info.keyspace()));
let db = db.as_ref().map(|db| db as &dyn HashDBRef<H, Vec<u8>>).unwrap_or(backend);
self.with_recorder_and_cache(Some(root), |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(db, &root)
.with_optional_recorder(recorder)
.with_optional_cache(cache)
.build();
callback(&trie)
})
}
/// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in
/// lexicographic order.
pub fn next_storage_key(&self, key: &[u8]) -> Result<Option<StorageKey>> {
@@ -316,21 +445,7 @@ where
child_info: Option<&ChildInfo>,
key: &[u8],
) -> Result<Option<StorageKey>> {
let dyn_eph: &dyn HashDBRef<_, _>;
let keyspace_eph;
if let Some(child_info) = child_info.as_ref() {
keyspace_eph = KeySpacedDB::new(self, child_info.keyspace());
dyn_eph = &keyspace_eph;
} else {
dyn_eph = self;
}
self.with_recorder_and_cache(Some(*root), |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(dyn_eph, root)
.with_optional_recorder(recorder)
.with_optional_cache(cache)
.build();
self.with_trie_db(*root, child_info, |trie| {
let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?;
// The key just after the one given in input, basically `key++0`.
@@ -429,246 +544,42 @@ where
})
}
/// Retrieve all entries keys of storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
///
/// Returns `true` when all keys were iterated.
pub fn apply_to_key_values_while(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: impl FnMut(Vec<u8>, Vec<u8>) -> bool,
allow_missing_nodes: bool,
) -> Result<bool> {
let root = if let Some(child_info) = child_info.as_ref() {
match self.child_root(child_info)? {
Some(child_root) => child_root,
None => return Ok(true),
}
/// Create a raw iterator over the storage.
pub fn raw_iter(&self, args: IterArgs) -> Result<RawIter<S, H, C>> {
let root = if let Some(child_info) = args.child_info.as_ref() {
let root = match self.child_root(&child_info)? {
Some(root) => root,
None => return Ok(Default::default()),
};
root
} else {
self.root
};
self.trie_iter_inner(&root, prefix, f, child_info, start_at, allow_missing_nodes)
}
/// Retrieve all entries keys of a storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
pub fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
) {
let root = if let Some(child_info) = child_info.as_ref() {
match self.child_root(child_info) {
Ok(Some(v)) => v,
// If the child trie doesn't exist, there is no need to continue.
Ok(None) => return,
Err(e) => {
debug!(target: "trie", "Error while iterating child storage: {}", e);
return
},
}
} else {
self.root
};
self.trie_iter_key_inner(&root, prefix, f, child_info, start_at)
}
/// Execute given closure for all keys starting with prefix.
pub fn for_child_keys_with_prefix(
&self,
child_info: &ChildInfo,
prefix: &[u8],
mut f: impl FnMut(&[u8]),
) {
let root = match self.child_root(child_info) {
Ok(Some(v)) => v,
// If the child trie doesn't exist, there is no need to continue.
Ok(None) => return,
Err(e) => {
debug!(target: "trie", "Error while iterating child storage: {}", e);
return
},
};
self.trie_iter_key_inner(
&root,
Some(prefix),
|k| {
f(k);
true
},
Some(child_info),
None,
)
}
/// Execute given closure for all keys starting with prefix.
pub fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], mut f: F) {
self.trie_iter_key_inner(
&self.root,
Some(prefix),
|k| {
f(k);
true
},
None,
None,
)
}
fn trie_iter_key_inner<F: FnMut(&[u8]) -> bool>(
&self,
root: &H::Out,
maybe_prefix: Option<&[u8]>,
mut f: F,
child_info: Option<&ChildInfo>,
maybe_start_at: Option<&[u8]>,
) {
let mut iter = move |db| -> sp_std::result::Result<(), Box<TrieError<H::Out>>> {
self.with_recorder_and_cache(Some(*root), |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(db, root)
.with_optional_recorder(recorder)
.with_optional_cache(cache)
.build();
let prefix = maybe_prefix.unwrap_or(&[]);
let iter = match maybe_start_at {
Some(start_at) =>
TrieDBKeyIterator::new_prefixed_then_seek(&trie, prefix, start_at),
None => TrieDBKeyIterator::new_prefixed(&trie, prefix),
}?;
for x in iter {
let key = x?;
debug_assert!(maybe_prefix
.as_ref()
.map(|prefix| key.starts_with(prefix))
.unwrap_or(true));
if !f(&key) {
break
}
}
Ok(())
})
};
let result = if let Some(child_info) = child_info {
let db = KeySpacedDB::new(self, child_info.keyspace());
iter(&db)
} else {
iter(self)
};
if let Err(e) = result {
debug!(target: "trie", "Error while iterating by prefix: {}", e);
if self.root == Default::default() {
// A special-case for an empty storage root.
return Ok(Default::default())
}
}
fn trie_iter_inner<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
root: &H::Out,
prefix: Option<&[u8]>,
mut f: F,
child_info: Option<&ChildInfo>,
start_at: Option<&[u8]>,
allow_missing_nodes: bool,
) -> Result<bool> {
let mut iter = move |db| -> sp_std::result::Result<bool, Box<TrieError<H::Out>>> {
self.with_recorder_and_cache(Some(*root), |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(db, root)
.with_optional_recorder(recorder)
.with_optional_cache(cache)
.build();
let prefix = prefix.unwrap_or(&[]);
let iterator = if let Some(start_at) = start_at {
TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)?
let trie_iter = self
.with_trie_db(root, args.child_info.as_ref(), |db| {
let prefix = args.prefix.as_deref().unwrap_or(&[]);
if let Some(start_at) = args.start_at {
TrieDBRawIterator::new_prefixed_then_seek(db, prefix, &start_at)
} else {
TrieDBIterator::new_prefixed(&trie, prefix)?
};
for x in iterator {
let (key, value) = x?;
debug_assert!(key.starts_with(prefix));
if !f(key, value) {
return Ok(false)
}
TrieDBRawIterator::new_prefixed(db, prefix)
}
Ok(true)
})
};
.map_err(|e| format!("TrieDB iteration error: {}", e))?;
let result = if let Some(child_info) = child_info {
let db = KeySpacedDB::new(self, child_info.keyspace());
iter(&db)
} else {
iter(self)
};
match result {
Ok(completed) => Ok(completed),
Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes =>
Ok(false),
Err(e) => Err(format!("TrieDB iteration error: {}", e)),
}
}
/// Execute given closure for all key and values starting with prefix.
pub fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], mut f: F) {
let _ = self.trie_iter_inner(
&self.root,
Some(prefix),
|k, v| {
f(&k, &v);
true
},
None,
None,
false,
);
}
/// Returns all `(key, value)` pairs in the trie.
pub fn pairs(&self) -> Vec<(StorageKey, StorageValue)> {
let collect_all = || -> sp_std::result::Result<_, Box<TrieError<H::Out>>> {
self.with_recorder_and_cache(None, |recorder, cache| {
let trie = TrieDBBuilder::<H>::new(self, self.root())
.with_optional_cache(cache)
.with_optional_recorder(recorder)
.build();
let mut v = Vec::new();
for x in trie.iter()? {
let (key, value) = x?;
v.push((key.to_vec(), value.to_vec()));
}
Ok(v)
})
};
match collect_all() {
Ok(v) => v,
Err(e) => {
debug!(target: "trie", "Error extracting trie values: {}", e);
Vec::new()
},
}
}
/// Returns all keys that start with the given `prefix`.
pub fn keys(&self, prefix: &[u8]) -> Vec<StorageKey> {
let mut keys = Vec::new();
self.for_keys_with_prefix(prefix, |k| keys.push(k.to_vec()));
keys
Ok(RawIter {
stop_on_incomplete_database: args.stop_on_incomplete_database,
child_info: args.child_info,
root,
trie_iter,
state: IterState::Pending,
_phantom: Default::default(),
})
}
/// Return the storage root after applying the given `delta`.
+1
View File
@@ -273,6 +273,7 @@ impl ChildInfo {
/// Returns byte sequence (keyspace) that can be use by underlying db to isolate keys.
/// This is a unique id of the child trie. The collision resistance of this value
/// depends on the type of child info use. For `ChildInfo::Default` it is and need to be.
#[inline]
pub fn keyspace(&self) -> &[u8] {
match self {
ChildInfo::ParentKeyId(..) => self.storage_key(),
+2 -2
View File
@@ -29,7 +29,7 @@ parking_lot = { version = "0.12.1", optional = true }
scale-info = { version = "2.1.1", default-features = false, features = ["derive"] }
thiserror = { version = "1.0.30", optional = true }
tracing = { version = "0.1.29", optional = true }
trie-db = { version = "0.24.0", default-features = false }
trie-db = { version = "0.25.0", default-features = false }
trie-root = { version = "0.17.0", default-features = false }
sp-core = { version = "7.0.0", default-features = false, path = "../core" }
sp-std = { version = "5.0.0", default-features = false, path = "../std" }
@@ -38,7 +38,7 @@ schnellru = { version = "0.2.1", optional = true }
[dev-dependencies]
array-bytes = "4.1"
criterion = "0.4.0"
trie-bench = "0.34.0"
trie-bench = "0.35.0"
trie-standardmap = "0.15.2"
sp-runtime = { version = "7.0.0", path = "../runtime" }
+2 -2
View File
@@ -438,8 +438,8 @@ enum ValueCache<'a, H: Hasher> {
impl<H: Hasher> ValueCache<'_, H> {
/// Get the value for the given `key`.
fn get<'a>(
&'a mut self,
fn get(
&mut self,
key: &[u8],
shared_cache: &SharedTrieCache<H>,
stats: &HitStats,
+2 -1
View File
@@ -51,7 +51,7 @@ pub use trie_db::{
nibble_ops,
node::{NodePlan, ValuePlan},
CError, DBValue, Query, Recorder, Trie, TrieCache, TrieConfiguration, TrieDBIterator,
TrieDBKeyIterator, TrieLayout, TrieMut, TrieRecorder,
TrieDBKeyIterator, TrieDBRawIterator, TrieLayout, TrieMut, TrieRecorder,
};
/// The Substrate format implementation of `TrieStream`.
pub use trie_stream::TrieStream;
@@ -442,6 +442,7 @@ fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec<u8>, Option<u8>)
impl<'a, DB: ?Sized, H> KeySpacedDB<'a, DB, H> {
/// instantiate new keyspaced db
#[inline]
pub fn new(db: &'a DB, ks: &'a [u8]) -> Self {
KeySpacedDB(db, ks, PhantomData)
}
+2 -1
View File
@@ -83,6 +83,7 @@ impl<H: Hasher> Recorder<H> {
///
/// - `storage_root`: The storage root of the trie for which accesses are recorded. This is
/// important when recording access to different tries at once (like top and child tries).
#[inline]
pub fn as_trie_recorder(
&self,
storage_root: H::Out,
@@ -147,7 +148,7 @@ struct TrieRecorder<H: Hasher, I> {
impl<H: Hasher, I: DerefMut<Target = RecorderInner<H::Out>>> trie_db::TrieRecorder<H::Out>
for TrieRecorder<H, I>
{
fn record<'b>(&mut self, access: TrieAccess<'b, H::Out>) {
fn record(&mut self, access: TrieAccess<H::Out>) {
let mut encoded_size_update = 0;
match access {
+1 -1
View File
@@ -41,7 +41,7 @@ pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "..
sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/finality-grandpa" }
sp-trie = { version = "7.0.0", default-features = false, path = "../../primitives/trie" }
sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" }
trie-db = { version = "0.24.0", default-features = false }
trie-db = { version = "0.25.1", default-features = false }
sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" }
sp-state-machine = { version = "0.13.0", default-features = false, path = "../../primitives/state-machine" }
sp-externalities = { version = "0.13.0", default-features = false, path = "../../primitives/externalities" }
@@ -20,7 +20,6 @@ use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider};
use sc_client_db::DbHash;
use sc_service::Configuration;
use sp_blockchain::HeaderBackend;
use sp_core::storage::StorageKey;
use sp_database::{ColumnId, Database};
use sp_runtime::traits::{Block as BlockT, HashFor};
use sp_state_machine::Storage;
@@ -192,8 +191,7 @@ impl StorageCmd {
BA: ClientBackend<B>,
{
let hash = client.usage_info().chain.best_hash;
let empty_prefix = StorageKey(Vec::new());
let mut keys = client.storage_keys(hash, &empty_prefix)?;
let mut keys: Vec<_> = client.storage_keys(hash, None, None)?.collect();
let (mut rng, _) = new_rng(None);
keys.shuffle(&mut rng);
@@ -17,7 +17,6 @@
use sc_cli::Result;
use sc_client_api::{Backend as ClientBackend, StorageProvider, UsageProvider};
use sp_core::storage::StorageKey;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT};
use log::info;
@@ -42,8 +41,7 @@ impl StorageCmd {
info!("Preparing keys from block {}", best_hash);
// Load all keys and randomly shuffle them.
let empty_prefix = StorageKey(Vec::new());
let mut keys = client.storage_keys(best_hash, &empty_prefix)?;
let mut keys: Vec<_> = client.storage_keys(best_hash, None, None)?.collect();
let (mut rng, _) = new_rng(None);
keys.shuffle(&mut rng);
@@ -55,8 +53,7 @@ impl StorageCmd {
match (self.params.include_child_trees, self.is_child_key(key.clone().0)) {
(true, Some(info)) => {
// child tree key
let child_keys = client.child_storage_keys(best_hash, &info, &empty_prefix)?;
for ck in child_keys {
for ck in client.child_storage_keys(best_hash, info.clone(), None, None)? {
child_nodes.push((ck.clone(), info.clone()));
}
},
@@ -61,7 +61,7 @@ impl StorageCmd {
info!("Preparing keys from block {}", best_hash);
// Load all KV pairs and randomly shuffle them.
let mut kvs = trie.pairs();
let mut kvs: Vec<_> = trie.pairs(Default::default())?.collect();
let (mut rng, _) = new_rng(None);
kvs.shuffle(&mut rng);
info!("Writing {} keys", kvs.len());
@@ -70,11 +70,12 @@ impl StorageCmd {
// Generate all random values first; Make sure there are no collisions with existing
// db entries, so we can rollback all additions without corrupting existing entries.
for (k, original_v) in kvs {
for key_value in kvs {
let (k, original_v) = key_value?;
match (self.params.include_child_trees, self.is_child_key(k.to_vec())) {
(true, Some(info)) => {
let child_keys =
client.child_storage_keys_iter(best_hash, info.clone(), None, None)?;
client.child_storage_keys(best_hash, info.clone(), None, None)?;
for ck in child_keys {
child_nodes.push((ck.clone(), info.clone()));
}
@@ -21,7 +21,7 @@ log = { version = "0.4.17", default-features = false }
sp-core = { path = "../../../../primitives/core" }
sp-state-machine = { path = "../../../../primitives/state-machine" }
sp-trie = { path = "../../../../primitives/trie" }
trie-db = "0.24.0"
trie-db = "0.25.1"
jsonrpsee = { version = "0.16.2", features = ["client-core", "server", "macros"] }