Further storage iterator refactoring (#13445)

* Remove `Backend::apply_to_key_values_while`

* Add `IterArgs::start_at_exclusive`

* Use `start_at_exclusive` in functions which used `Backend::apply_to_key_values_while`

* Remove `Backend::apply_to_keys_while`

* Remove `for_keys_with_prefix`, `for_key_values_with_prefix` and `for_child_keys_with_prefix`

* Remove unnecessary `to_vec` calls

* Fix unused method warning in no_std

* Remove unnecessary import

* Also check proof sizes in the test

* Iterate over both keys and values in `prove_range_read_with_size` and add a test
This commit is contained in:
Koute
2023-02-27 19:45:42 +09:00
committed by GitHub
parent bb00b262d7
commit 91c595bc78
6 changed files with 226 additions and 370 deletions
+9 -40
View File
@@ -324,7 +324,6 @@ where
{
inner: <State as StateBackend<HashFor<Block>>>::RawIter,
state: State,
skip_if_first: Option<StorageKey>,
}
impl<State, Block> KeysIter<State, Block>
@@ -341,13 +340,9 @@ where
let mut args = IterArgs::default();
args.prefix = prefix.as_ref().map(|prefix| prefix.0.as_slice());
args.start_at = start_at.as_ref().map(|start_at| start_at.0.as_slice());
args.start_at_exclusive = true;
let start_at = args.start_at;
Ok(Self {
inner: state.raw_iter(args)?,
state,
skip_if_first: start_at.map(|key| StorageKey(key.to_vec())),
})
Ok(Self { inner: state.raw_iter(args)?, state })
}
/// Create a new iterator over a child storage's keys.
@@ -361,13 +356,9 @@ where
args.prefix = prefix.as_ref().map(|prefix| prefix.0.as_slice());
args.start_at = start_at.as_ref().map(|start_at| start_at.0.as_slice());
args.child_info = Some(child_info);
args.start_at_exclusive = true;
let start_at = args.start_at;
Ok(Self {
inner: state.raw_iter(args)?,
state,
skip_if_first: start_at.map(|key| StorageKey(key.to_vec())),
})
Ok(Self { inner: state.raw_iter(args)?, state })
}
}
@@ -379,15 +370,7 @@ where
type Item = StorageKey;
fn next(&mut self) -> Option<Self::Item> {
let key = self.inner.next_key(&self.state)?.ok().map(StorageKey)?;
if let Some(skipped_key) = self.skip_if_first.take() {
if key == skipped_key {
return self.next()
}
}
Some(key)
self.inner.next_key(&self.state)?.ok().map(StorageKey)
}
}
@@ -399,7 +382,6 @@ where
{
inner: <State as StateBackend<HashFor<Block>>>::RawIter,
state: State,
skip_if_first: Option<StorageKey>,
}
impl<State, Block> Iterator for PairsIter<State, Block>
@@ -410,19 +392,10 @@ where
type Item = (StorageKey, StorageData);
fn next(&mut self) -> Option<Self::Item> {
let (key, value) = self
.inner
self.inner
.next_pair(&self.state)?
.ok()
.map(|(key, value)| (StorageKey(key), StorageData(value)))?;
if let Some(skipped_key) = self.skip_if_first.take() {
if key == skipped_key {
return self.next()
}
}
Some((key, value))
.map(|(key, value)| (StorageKey(key), StorageData(value)))
}
}
@@ -440,13 +413,9 @@ where
let mut args = IterArgs::default();
args.prefix = prefix.as_ref().map(|prefix| prefix.0.as_slice());
args.start_at = start_at.as_ref().map(|start_at| start_at.0.as_slice());
args.start_at_exclusive = true;
let start_at = args.start_at;
Ok(Self {
inner: state.raw_iter(args)?,
state,
skip_if_first: start_at.map(|key| StorageKey(key.to_vec())),
})
Ok(Self { inner: state.raw_iter(args)?, state })
}
}
+15 -105
View File
@@ -43,6 +43,10 @@ pub struct IterArgs<'a> {
/// This is inclusive and the iteration will include the key which is specified here.
pub start_at: Option<&'a [u8]>,
/// If this is `true` then the iteration will *not* include
/// the key specified in `start_at`, if there is such a key.
pub start_at_exclusive: bool,
/// The info of the child trie over which to iterate over.
pub child_info: Option<ChildInfo>,
@@ -117,6 +121,17 @@ where
}
}
impl<'a, H, I> PairsIter<'a, H, I>
where
H: Hasher,
I: StorageIterator<H> + Default,
{
#[cfg(feature = "std")]
pub(crate) fn was_complete(&self) -> bool {
self.raw_iter.was_complete()
}
}
/// An iterator over storage keys.
pub struct KeysIter<'a, H, I>
where
@@ -214,111 +229,6 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
key: &[u8],
) -> Result<Option<StorageKey>, Self::Error>;
/// Iterate over storage starting at key, for a given prefix and child trie.
/// Aborts as soon as `f` returns false.
/// Warning, this fails at first error when usual iteration skips errors.
/// If `allow_missing` is true, iteration stops when it reaches a missing trie node.
/// Otherwise an error is produced.
///
/// Returns `true` if trie end is reached.
// TODO: Remove this.
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
mut f: F,
allow_missing: bool,
) -> Result<bool, Self::Error> {
let args = IterArgs {
child_info: child_info.cloned(),
prefix,
start_at,
stop_on_incomplete_database: allow_missing,
..IterArgs::default()
};
let mut iter = self.pairs(args)?;
while let Some(key_value) = iter.next() {
let (key, value) = key_value?;
if !f(key, value) {
return Ok(false)
}
}
Ok(iter.raw_iter.was_complete())
}
/// Retrieve all entries keys of storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
// TODO: Remove this.
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
mut f: F,
) -> Result<(), Self::Error> {
let args =
IterArgs { child_info: child_info.cloned(), prefix, start_at, ..IterArgs::default() };
for key in self.keys(args)? {
if !f(&key?) {
return Ok(())
}
}
Ok(())
}
/// Retrieve all entries keys which start with the given prefix and
/// call `f` for each of those keys.
// TODO: Remove this.
fn for_keys_with_prefix<F: FnMut(&[u8])>(
&self,
prefix: &[u8],
mut f: F,
) -> Result<(), Self::Error> {
let args = IterArgs { prefix: Some(prefix), ..IterArgs::default() };
self.keys(args)?.try_for_each(|key| {
f(&key?);
Ok(())
})
}
/// Retrieve all entries keys and values of which start with the given prefix and
/// call `f` for each of those keys.
// TODO: Remove this.
fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(
&self,
prefix: &[u8],
mut f: F,
) -> Result<(), Self::Error> {
let args = IterArgs { prefix: Some(prefix), ..IterArgs::default() };
self.pairs(args)?.try_for_each(|key_value| {
let (key, value) = key_value?;
f(&key, &value);
Ok(())
})
}
/// Retrieve all child entries keys which start with the given prefix and
/// call `f` for each of those keys.
// TODO: Remove this.
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
child_info: &ChildInfo,
prefix: &[u8],
mut f: F,
) -> Result<(), Self::Error> {
let args = IterArgs {
child_info: Some(child_info.clone()),
prefix: Some(prefix),
..IterArgs::default()
};
self.keys(args)?.try_for_each(|key| {
f(&key?);
Ok(())
})
}
/// Calculate the storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
/// Does not include child storage updates.
+45 -29
View File
@@ -19,7 +19,9 @@
#[cfg(feature = "std")]
use crate::overlayed_changes::OverlayedExtensions;
use crate::{backend::Backend, IndexOperation, OverlayedChanges, StorageKey, StorageValue};
use crate::{
backend::Backend, IndexOperation, IterArgs, OverlayedChanges, StorageKey, StorageValue,
};
use codec::{Decode, Encode, EncodeAppend};
use hash_db::Hasher;
#[cfg(feature = "std")]
@@ -750,40 +752,54 @@ where
{
fn limit_remove_from_backend(
&mut self,
maybe_child: Option<&ChildInfo>,
maybe_prefix: Option<&[u8]>,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
maybe_limit: Option<u32>,
maybe_cursor: Option<&[u8]>,
start_at: Option<&[u8]>,
) -> (Option<Vec<u8>>, u32, u32) {
let iter = match self.backend.keys(IterArgs {
child_info: child_info.cloned(),
prefix,
start_at,
..IterArgs::default()
}) {
Ok(iter) => iter,
Err(error) => {
log::debug!(target: "trie", "Error while iterating the storage: {}", error);
return (None, 0, 0)
},
};
let mut delete_count: u32 = 0;
let mut loop_count: u32 = 0;
let mut maybe_next_key = None;
let result =
self.backend
.apply_to_keys_while(maybe_child, maybe_prefix, maybe_cursor, |key| {
if maybe_limit.map_or(false, |limit| loop_count == limit) {
maybe_next_key = Some(key.to_vec());
return false
}
let overlay = match maybe_child {
Some(child_info) => self.overlay.child_storage(child_info, key),
None => self.overlay.storage(key),
};
if !matches!(overlay, Some(None)) {
// not pending deletion from the backend - delete it.
if let Some(child_info) = maybe_child {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
} else {
self.overlay.set_storage(key.to_vec(), None);
}
delete_count = delete_count.saturating_add(1);
}
loop_count = loop_count.saturating_add(1);
true
});
for key in iter {
let key = match key {
Ok(key) => key,
Err(error) => {
log::debug!(target: "trie", "Error while iterating the storage: {}", error);
break
},
};
if let Err(error) = result {
log::debug!(target: "trie", "Error while iterating the storage: {}", error);
if maybe_limit.map_or(false, |limit| loop_count == limit) {
maybe_next_key = Some(key);
break
}
let overlay = match child_info {
Some(child_info) => self.overlay.child_storage(child_info, &key),
None => self.overlay.storage(&key),
};
if !matches!(overlay, Some(None)) {
// not pending deletion from the backend - delete it.
if let Some(child_info) = child_info {
self.overlay.set_child_storage(child_info, key, None);
} else {
self.overlay.set_storage(key, None);
}
delete_count = delete_count.saturating_add(1);
}
loop_count = loop_count.saturating_add(1);
}
(maybe_next_key, delete_count, loop_count)
+125 -105
View File
@@ -849,48 +849,37 @@ mod execution {
let start_at_ref = start_at.as_ref().map(AsRef::as_ref);
let mut switch_child_key = None;
let mut first = start_at.is_some();
let completed = proving_backend
.apply_to_key_values_while(
child_info.as_ref(),
None,
start_at_ref,
|key, value| {
if first &&
start_at_ref
.as_ref()
.map(|start| &key.as_slice() > start)
.unwrap_or(true)
{
first = false;
}
if first {
true
} else if depth < MAX_NESTED_TRIE_DEPTH &&
sp_core::storage::well_known_keys::is_child_storage_key(
key.as_slice(),
) {
count += 1;
if !child_roots.contains(value.as_slice()) {
child_roots.insert(value);
switch_child_key = Some(key);
false
} else {
// do not add two child trie with same root
true
}
} else if recorder.estimate_encoded_size() <= size_limit {
count += 1;
true
} else {
false
}
},
false,
)
let mut iter = proving_backend
.pairs(IterArgs {
child_info,
start_at: start_at_ref,
start_at_exclusive: true,
..IterArgs::default()
})
.map_err(|e| Box::new(e) as Box<dyn Error>)?;
while let Some(item) = iter.next() {
let (key, value) = item.map_err(|e| Box::new(e) as Box<dyn Error>)?;
if depth < MAX_NESTED_TRIE_DEPTH &&
sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice())
{
count += 1;
// do not add two child trie with same root
if !child_roots.contains(value.as_slice()) {
child_roots.insert(value);
switch_child_key = Some(key);
break
}
} else if recorder.estimate_encoded_size() <= size_limit {
count += 1;
} else {
break
}
}
let completed = iter.was_complete();
if switch_child_key.is_none() {
if depth == 1 {
break
@@ -951,23 +940,27 @@ mod execution {
let proving_backend =
TrieBackendBuilder::wrap(trie_backend).with_recorder(recorder.clone()).build();
let mut count = 0;
proving_backend
.apply_to_key_values_while(
child_info,
let iter = proving_backend
// NOTE: Even though the loop below doesn't use these values
// this *must* fetch both the keys and the values so that
// the proof is correct.
.pairs(IterArgs {
child_info: child_info.cloned(),
prefix,
start_at,
|_key, _value| {
if count == 0 || recorder.estimate_encoded_size() <= size_limit {
count += 1;
true
} else {
false
}
},
false,
)
..IterArgs::default()
})
.map_err(|e| Box::new(e) as Box<dyn Error>)?;
for item in iter {
item.map_err(|e| Box::new(e) as Box<dyn Error>)?;
if count == 0 || recorder.estimate_encoded_size() <= size_limit {
count += 1;
} else {
break
}
}
let proof = proving_backend
.extract_proof()
.expect("A recorder was set and thus, a storage proof can be extracted; qed");
@@ -1173,20 +1166,25 @@ mod execution {
H::Out: Ord + Codec,
{
let mut values = Vec::new();
let result = proving_backend.apply_to_key_values_while(
child_info,
prefix,
start_at,
|key, value| {
values.push((key.to_vec(), value.to_vec()));
count.as_ref().map_or(true, |c| (values.len() as u32) < *c)
},
true,
);
match result {
Ok(completed) => Ok((values, completed)),
Err(e) => Err(Box::new(e) as Box<dyn Error>),
let mut iter = proving_backend
.pairs(IterArgs {
child_info: child_info.cloned(),
prefix,
start_at,
stop_on_incomplete_database: true,
..IterArgs::default()
})
.map_err(|e| Box::new(e) as Box<dyn Error>)?;
while let Some(item) = iter.next() {
let (key, value) = item.map_err(|e| Box::new(e) as Box<dyn Error>)?;
values.push((key, value));
if !count.as_ref().map_or(true, |c| (values.len() as u32) < *c) {
break
}
}
Ok((values, iter.was_complete()))
}
/// Check storage range proof on pre-created proving backend.
@@ -1255,47 +1253,35 @@ mod execution {
};
let start_at_ref = start_at.as_ref().map(AsRef::as_ref);
let mut switch_child_key = None;
let mut first = start_at.is_some();
let completed = proving_backend
.apply_to_key_values_while(
child_info.as_ref(),
None,
start_at_ref,
|key, value| {
if first &&
start_at_ref
.as_ref()
.map(|start| &key.as_slice() > start)
.unwrap_or(true)
{
first = false;
}
if !first {
values.push((key.to_vec(), value.to_vec()));
}
if first {
true
} else if depth < MAX_NESTED_TRIE_DEPTH &&
sp_core::storage::well_known_keys::is_child_storage_key(
key.as_slice(),
) {
if child_roots.contains(value.as_slice()) {
// Do not add two chid trie with same root.
true
} else {
child_roots.insert(value.clone());
switch_child_key = Some((key, value));
false
}
} else {
true
}
},
true,
)
let mut iter = proving_backend
.pairs(IterArgs {
child_info,
start_at: start_at_ref,
start_at_exclusive: true,
stop_on_incomplete_database: true,
..IterArgs::default()
})
.map_err(|e| Box::new(e) as Box<dyn Error>)?;
while let Some(item) = iter.next() {
let (key, value) = item.map_err(|e| Box::new(e) as Box<dyn Error>)?;
values.push((key.to_vec(), value.to_vec()));
if depth < MAX_NESTED_TRIE_DEPTH &&
sp_core::storage::well_known_keys::is_child_storage_key(key.as_slice())
{
// Do not add two chid trie with same root.
if !child_roots.contains(value.as_slice()) {
child_roots.insert(value.clone());
switch_child_key = Some((key, value));
break
}
}
}
let completed = iter.was_complete();
if switch_child_key.is_none() {
if !completed {
break depth
@@ -1325,9 +1311,13 @@ mod tests {
storage::{ChildInfo, StateVersion},
testing::TaskExecutor,
traits::{CallContext, CodeExecutor, Externalities, RuntimeCode},
H256,
};
use sp_runtime::traits::BlakeTwo256;
use sp_trie::trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1};
use sp_trie::{
trie_types::{TrieDBMutBuilderV0, TrieDBMutBuilderV1},
KeySpacedDBMut, PrefixedMemoryDB,
};
use std::collections::{BTreeMap, HashMap};
#[derive(Clone)]
@@ -1953,12 +1943,14 @@ mod tests {
// Always contains at least some nodes.
assert_eq!(proof.to_memory_db::<BlakeTwo256>().drain().len(), 3);
assert_eq!(count, 1);
assert_eq!(proof.encoded_size(), 443);
let remote_backend = trie_backend::tests::test_trie(state_version, None, None);
let (proof, count) =
prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap();
assert_eq!(proof.to_memory_db::<BlakeTwo256>().drain().len(), 9);
assert_eq!(count, 85);
assert_eq!(proof.encoded_size(), 857);
let (results, completed) = read_range_proof_check::<BlakeTwo256>(
remote_root,
proof.clone(),
@@ -1982,6 +1974,8 @@ mod tests {
prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap();
assert_eq!(proof.to_memory_db::<BlakeTwo256>().drain().len(), 11);
assert_eq!(count, 132);
assert_eq!(proof.encoded_size(), 990);
let (results, completed) =
read_range_proof_check::<BlakeTwo256>(remote_root, proof, None, None, None, None)
.unwrap();
@@ -1989,6 +1983,32 @@ mod tests {
assert_eq!(completed, true);
}
#[test]
fn prove_read_with_size_limit_proof_size() {
let mut root = H256::default();
let mut mdb = PrefixedMemoryDB::<BlakeTwo256>::default();
{
let mut mdb = KeySpacedDBMut::new(&mut mdb, b"");
let mut trie = TrieDBMutBuilderV1::new(&mut mdb, &mut root).build();
trie.insert(b"value1", &[123; 1]).unwrap();
trie.insert(b"value2", &[123; 10]).unwrap();
trie.insert(b"value3", &[123; 100]).unwrap();
trie.insert(b"value4", &[123; 1000]).unwrap();
}
let remote_backend: TrieBackend<PrefixedMemoryDB<BlakeTwo256>, BlakeTwo256> =
TrieBackendBuilder::new(mdb, root)
.with_optional_cache(None)
.with_optional_recorder(None)
.build();
let (proof, count) =
prove_range_read_with_size(remote_backend, None, None, 1000, None).unwrap();
assert_eq!(proof.encoded_size(), 1267);
assert_eq!(count, 3);
}
#[test]
fn inner_state_versioning_switch_proofs() {
let mut state_version = StateVersion::V0;
@@ -357,7 +357,7 @@ pub mod tests {
trie_types::{TrieDBBuilder, TrieDBMutBuilderV0, TrieDBMutBuilderV1},
KeySpacedDBMut, PrefixedKey, PrefixedMemoryDB, Trie, TrieCache, TrieMut,
};
use std::{collections::HashSet, iter};
use std::iter;
use trie_db::NodeCodec;
const CHILD_KEY_1: &[u8] = b"sub1";
@@ -643,73 +643,6 @@ pub mod tests {
.collect::<Vec<_>>(),
vec![b"value1".to_vec(), b"value2".to_vec(),]
);
// Also test out the wrapper methods.
// TODO: Remove this once these methods are gone.
let mut list = Vec::new();
assert!(trie
.apply_to_key_values_while(
None,
None,
Some(b"key"),
|key, _| {
list.push(key);
true
},
false
)
.unwrap());
assert_eq!(list[0..3], vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]);
let mut list = Vec::new();
trie.apply_to_keys_while(None, None, Some(b"key"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(list[0..3], vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]);
let mut list = Vec::new();
trie.apply_to_keys_while(None, None, Some(b"k"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(list[0..3], vec![b"key".to_vec(), b"value1".to_vec(), b"value2".to_vec(),]);
let mut list = Vec::new();
trie.apply_to_keys_while(None, None, Some(b""), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(
list[0..5],
vec![
b":child_storage:default:sub1".to_vec(),
b":code".to_vec(),
b"key".to_vec(),
b"value1".to_vec(),
b"value2".to_vec(),
]
);
let mut list = Vec::new();
trie.apply_to_keys_while(None, Some(b"value"), Some(b"key"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert!(list.is_empty());
let mut list = Vec::new();
trie.apply_to_keys_while(None, Some(b"value"), Some(b"value"), |key| {
list.push(key.to_vec());
true
})
.unwrap();
assert_eq!(list, vec![b"value1".to_vec(), b"value2".to_vec(),]);
}
parameterized_test!(storage_root_is_non_default, storage_root_is_non_default_inner);
@@ -745,27 +678,6 @@ pub mod tests {
);
}
parameterized_test!(prefix_walking_works, prefix_walking_works_inner);
fn prefix_walking_works_inner(
state_version: StateVersion,
cache: Option<Cache>,
recorder: Option<Recorder>,
) {
let trie = test_trie(state_version, cache, recorder);
let mut seen = HashSet::new();
trie.for_keys_with_prefix(b"value", |key| {
let for_first_time = seen.insert(key.to_vec());
assert!(for_first_time, "Seen key '{:?}' more than once", key);
})
.unwrap();
let mut expected = HashSet::new();
expected.insert(b"value1".to_vec());
expected.insert(b"value2".to_vec());
assert_eq!(seen, expected);
}
parameterized_test!(
keys_with_empty_prefix_returns_all_keys,
keys_with_empty_prefix_returns_all_keys_inner
@@ -87,6 +87,7 @@ where
H: Hasher,
{
stop_on_incomplete_database: bool,
skip_if_first: Option<StorageKey>,
root: H::Out,
child_info: Option<ChildInfo>,
trie_iter: TrieDBRawIterator<Layout<H>>,
@@ -144,6 +145,7 @@ where
fn default() -> Self {
Self {
stop_on_incomplete_database: false,
skip_if_first: None,
child_info: None,
root: Default::default(),
trie_iter: TrieDBRawIterator::empty(),
@@ -165,12 +167,34 @@ where
#[inline]
fn next_key(&mut self, backend: &Self::Backend) -> Option<Result<StorageKey>> {
self.prepare(&backend.essence, |trie, trie_iter| trie_iter.next_key(&trie))
let skip_if_first = self.skip_if_first.take();
self.prepare(&backend.essence, |trie, trie_iter| {
let mut result = trie_iter.next_key(&trie);
if let Some(skipped_key) = skip_if_first {
if let Some(Ok(ref key)) = result {
if *key == skipped_key {
result = trie_iter.next_key(&trie);
}
}
}
result
})
}
#[inline]
fn next_pair(&mut self, backend: &Self::Backend) -> Option<Result<(StorageKey, StorageValue)>> {
self.prepare(&backend.essence, |trie, trie_iter| trie_iter.next_item(&trie))
let skip_if_first = self.skip_if_first.take();
self.prepare(&backend.essence, |trie, trie_iter| {
let mut result = trie_iter.next_item(&trie);
if let Some(skipped_key) = skip_if_first {
if let Some(Ok((ref key, _))) = result {
if *key == skipped_key {
result = trie_iter.next_item(&trie);
}
}
}
result
})
}
fn was_complete(&self) -> bool {
@@ -574,6 +598,11 @@ where
Ok(RawIter {
stop_on_incomplete_database: args.stop_on_incomplete_database,
skip_if_first: if args.start_at_exclusive {
args.start_at.map(|key| key.to_vec())
} else {
None
},
child_info: args.child_info,
root,
trie_iter,