* State sync

* Importing state fixes

* Bugfixes

* Sync with proof

* Status reporting

* Unsafe sync mode

* Sync test

* Cleanup

* Apply suggestions from code review

Co-authored-by: cheme <emericchevalier.pro@gmail.com>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>

* set_genesis_storage

* Extract keys from range proof

* Detect iter completion

* Download and import bodies with fast sync

* Replaced meta updates tuple with a struct

* Fixed reverting finalized state

* Reverted timeout

* Typo

* Doc

* Doc

* Fixed light client test

* Fixed error handling

* Tweaks

* More UpdateMeta changes

* Rename convert_transaction

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Apply suggestions from code review

Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>

* Code review suggestions

* Fixed count handling

Co-authored-by: cheme <emericchevalier.pro@gmail.com>
Co-authored-by: Pierre Krieger <pierre.krieger1708@gmail.com>
Co-authored-by: Bastian Köcher <bkchr@users.noreply.github.com>
This commit is contained in:
Arkadiy Paronyan
2021-06-22 11:32:43 +02:00
committed by GitHub
parent 5899eedc8c
commit 77a4b980ae
54 changed files with 2128 additions and 379 deletions
@@ -93,6 +93,22 @@ pub trait Backend<H: Hasher>: sp_std::fmt::Debug {
key: &[u8]
) -> Result<Option<StorageKey>, Self::Error>;
/// Iterate over storage starting at key, for a given prefix and child trie.
/// Aborts as soon as `f` returns false.
/// Warning, this fails at first error when usual iteration skips errors.
/// If `allow_missing` is true, iteration stops when it reaches a missing trie node.
/// Otherwise an error is produced.
///
/// Returns `true` if trie end is reached.
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result<bool, Self::Error>;
/// Retrieve all entries keys of storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
+145 -1
View File
@@ -726,6 +726,50 @@ mod execution {
prove_read_on_trie_backend(trie_backend, keys)
}
/// Generate range storage read proof.
pub fn prove_range_read_with_size<B, H>(
mut backend: B,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
size_limit: usize,
start_at: Option<&[u8]>,
) -> Result<(StorageProof, u32), Box<dyn Error>>
where
B: Backend<H>,
H: Hasher,
H::Out: Ord + Codec,
{
let trie_backend = backend.as_trie_backend()
.ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box<dyn Error>)?;
prove_range_read_with_size_on_trie_backend(trie_backend, child_info, prefix, size_limit, start_at)
}
/// Generate range storage read proof on an existing trie backend.
pub fn prove_range_read_with_size_on_trie_backend<S, H>(
trie_backend: &TrieBackend<S, H>,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
size_limit: usize,
start_at: Option<&[u8]>,
) -> Result<(StorageProof, u32), Box<dyn Error>>
where
S: trie_backend_essence::TrieBackendStorage<H>,
H: Hasher,
H::Out: Ord + Codec,
{
let proving_backend = proving_backend::ProvingBackend::<S, H>::new(trie_backend);
let mut count = 0;
proving_backend.apply_to_key_values_while(child_info, prefix, start_at, |_key, _value| {
if count == 0 || proving_backend.estimate_encoded_size() <= size_limit {
count += 1;
true
} else {
false
}
}, false).map_err(|e| Box::new(e) as Box<dyn Error>)?;
Ok((proving_backend.extract_proof(), count))
}
/// Generate child storage read proof.
pub fn prove_child_read<B, H, I>(
mut backend: B,
@@ -808,6 +852,29 @@ mod execution {
Ok(result)
}
/// Check child storage range proof, generated by `prove_range_read` call.
pub fn read_range_proof_check<H>(
root: H::Out,
proof: StorageProof,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
count: Option<u32>,
start_at: Option<&[u8]>,
) -> Result<(Vec<(Vec<u8>, Vec<u8>)>, bool), Box<dyn Error>>
where
H: Hasher,
H::Out: Ord + Codec,
{
let proving_backend = create_proof_check_backend::<H>(root, proof)?;
read_range_proof_check_on_proving_backend(
&proving_backend,
child_info,
prefix,
count,
start_at,
)
}
/// Check child storage read proof, generated by `prove_child_read` call.
pub fn read_child_proof_check<H, I>(
root: H::Out,
@@ -859,6 +926,32 @@ mod execution {
proving_backend.child_storage(child_info, key)
.map_err(|e| Box::new(e) as Box<dyn Error>)
}
/// Check storage range proof on pre-created proving backend.
///
/// Returns a vector with the read `key => value` pairs and a `bool` that is set to `true` when
/// all `key => value` pairs could be read and no more are left.
pub fn read_range_proof_check_on_proving_backend<H>(
proving_backend: &TrieBackend<MemoryDB<H>, H>,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
count: Option<u32>,
start_at: Option<&[u8]>,
) -> Result<(Vec<(Vec<u8>, Vec<u8>)>, bool), Box<dyn Error>>
where
H: Hasher,
H::Out: Ord + Codec,
{
let mut values = Vec::new();
let result = proving_backend.apply_to_key_values_while(child_info, prefix, start_at, |key, value| {
values.push((key.to_vec(), value.to_vec()));
count.as_ref().map_or(true, |c| (values.len() as u32) < *c)
}, true);
match result {
Ok(completed) => Ok((values, completed)),
Err(e) => Err(Box::new(e) as Box<dyn Error>),
}
}
}
#[cfg(test)]
@@ -1457,7 +1550,7 @@ mod tests {
remote_proof.clone(),
&[&[0xff]],
).is_ok();
// check that results are correct
// check that results are correct
assert_eq!(
local_result1.into_iter().collect::<Vec<_>>(),
vec![(b"value2".to_vec(), Some(vec![24]))],
@@ -1494,6 +1587,57 @@ mod tests {
);
}
#[test]
fn prove_read_with_size_limit_works() {
let remote_backend = trie_backend::tests::test_trie();
let remote_root = remote_backend.storage_root(::std::iter::empty()).0;
let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap();
// Alwasys contains at least some nodes.
assert_eq!(proof.into_memory_db::<BlakeTwo256>().drain().len(), 3);
assert_eq!(count, 1);
let remote_backend = trie_backend::tests::test_trie();
let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap();
assert_eq!(proof.clone().into_memory_db::<BlakeTwo256>().drain().len(), 9);
assert_eq!(count, 85);
let (results, completed) = read_range_proof_check::<BlakeTwo256>(
remote_root,
proof.clone(),
None,
None,
Some(count),
None,
).unwrap();
assert_eq!(results.len() as u32, count);
assert_eq!(completed, false);
// When checking without count limit, proof may actually contain extra values.
let (results, completed) = read_range_proof_check::<BlakeTwo256>(
remote_root,
proof,
None,
None,
None,
None,
).unwrap();
assert_eq!(results.len() as u32, 101);
assert_eq!(completed, false);
let remote_backend = trie_backend::tests::test_trie();
let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap();
assert_eq!(proof.clone().into_memory_db::<BlakeTwo256>().drain().len(), 11);
assert_eq!(count, 132);
let (results, completed) = read_range_proof_check::<BlakeTwo256>(
remote_root,
proof.clone(),
None,
None,
None,
None,
).unwrap();
assert_eq!(results.len() as u32, count);
assert_eq!(completed, true);
}
#[test]
fn compact_multiple_child_trie() {
// this root will be queried
@@ -303,7 +303,7 @@ impl OverlayedChanges {
/// Set a new value for the specified key.
///
/// Can be rolled back or committed when called inside a transaction.
pub(crate) fn set_storage(&mut self, key: StorageKey, val: Option<StorageValue>) {
pub fn set_storage(&mut self, key: StorageKey, val: Option<StorageValue>) {
let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0);
self.stats.tally_write_overlay(size_write);
self.top.set(key, val, self.extrinsic_index());
@@ -212,6 +212,14 @@ impl<'a, S: 'a + TrieBackendStorage<H>, H: 'a + Hasher> ProvingBackend<'a, S, H>
pub fn extract_proof(&self) -> StorageProof {
self.0.essence().backend_storage().proof_recorder.to_storage_proof()
}
/// Returns the estimated encoded size of the proof.
///
/// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual
/// encoded proof.
pub fn estimate_encoded_size(&self) -> usize {
self.0.essence().backend_storage().proof_recorder.estimate_encoded_size()
}
}
impl<'a, S: 'a + TrieBackendStorage<H>, H: 'a + Hasher> TrieBackendStorage<H>
@@ -260,6 +268,17 @@ impl<'a, S, H> Backend<H> for ProvingBackend<'a, S, H>
self.0.child_storage(child_info, key)
}
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result<bool, Self::Error> {
self.0.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing)
}
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
@@ -113,6 +113,17 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
self.essence.for_key_values_with_prefix(prefix, f)
}
fn apply_to_key_values_while<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: F,
allow_missing: bool,
) -> Result<bool, Self::Error> {
self.essence.apply_to_key_values_while(child_info, prefix, start_at, f, allow_missing)
}
fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
&self,
child_info: Option<&ChildInfo>,
@@ -189,6 +189,43 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
.map_err(map_e)
}
/// Retrieve all entries keys of storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
///
/// Returns `true` when all keys were iterated.
pub fn apply_to_key_values_while(
&self,
child_info: Option<&ChildInfo>,
prefix: Option<&[u8]>,
start_at: Option<&[u8]>,
f: impl FnMut(Vec<u8>, Vec<u8>) -> bool,
allow_missing_nodes: bool,
) -> Result<bool> {
let mut child_root;
let root = if let Some(child_info) = child_info.as_ref() {
if let Some(fetched_child_root) = self.child_root(child_info)? {
child_root = H::Out::default();
// root is fetched from DB, not writable by runtime, so it's always valid.
child_root.as_mut().copy_from_slice(fetched_child_root.as_slice());
&child_root
} else {
return Ok(true);
}
} else {
&self.root
};
self.trie_iter_inner(
&root,
prefix,
f,
child_info,
start_at,
allow_missing_nodes,
)
}
/// Retrieve all entries keys of a storage and call `f` for each of those keys.
/// Aborts as soon as `f` returns false.
pub fn apply_to_keys_while<F: FnMut(&[u8]) -> bool>(
@@ -212,15 +249,15 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
&self.root
};
self.trie_iter_inner(root, prefix, |k, _v| f(k), child_info)
let _ = self.trie_iter_inner(root, prefix, |k, _v| { f(&k); true}, child_info, None, false);
}
/// Execute given closure for all keys starting with prefix.
pub fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
pub fn for_child_keys_with_prefix(
&self,
child_info: &ChildInfo,
prefix: &[u8],
mut f: F,
mut f: impl FnMut(&[u8]),
) {
let root_vec = match self.child_root(child_info) {
Ok(v) => v.unwrap_or_else(|| empty_child_trie_root::<Layout<H>>().encode()),
@@ -231,41 +268,43 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
};
let mut root = H::Out::default();
root.as_mut().copy_from_slice(&root_vec);
self.trie_iter_inner(&root, Some(prefix), |k, _v| { f(k); true }, Some(child_info))
let _ = self.trie_iter_inner(&root, Some(prefix), |k, _v| { f(&k); true }, Some(child_info), None, false);
}
/// Execute given closure for all keys starting with prefix.
pub fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], mut f: F) {
self.trie_iter_inner(&self.root, Some(prefix), |k, _v| { f(k); true }, None)
let _ = self.trie_iter_inner(&self.root, Some(prefix), |k, _v| { f(&k); true }, None, None, false);
}
fn trie_iter_inner<F: FnMut(&[u8], &[u8]) -> bool>(
fn trie_iter_inner<F: FnMut(Vec<u8>, Vec<u8>) -> bool>(
&self,
root: &H::Out,
prefix: Option<&[u8]>,
mut f: F,
child_info: Option<&ChildInfo>,
) {
let mut iter = move |db| -> sp_std::result::Result<(), Box<TrieError<H::Out>>> {
start_at: Option<&[u8]>,
allow_missing_nodes: bool,
) -> Result<bool> {
let mut iter = move |db| -> sp_std::result::Result<bool, Box<TrieError<H::Out>>> {
let trie = TrieDB::<H>::new(db, root)?;
let iter = if let Some(prefix) = prefix.as_ref() {
TrieDBIterator::new_prefixed(&trie, prefix)?
let prefix = prefix.unwrap_or(&[]);
let iterator = if let Some(start_at) = start_at {
TrieDBIterator::new_prefixed_then_seek(&trie, prefix, start_at)?
} else {
TrieDBIterator::new(&trie)?
TrieDBIterator::new_prefixed(&trie, prefix)?
};
for x in iter {
for x in iterator {
let (key, value) = x?;
debug_assert!(prefix.as_ref().map(|prefix| key.starts_with(prefix)).unwrap_or(true));
debug_assert!(key.starts_with(prefix));
if !f(&key, &value) {
break;
if !f(key, value) {
return Ok(false)
}
}
Ok(())
Ok(true)
};
let result = if let Some(child_info) = child_info {
@@ -274,14 +313,16 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
} else {
iter(self)
};
if let Err(e) = result {
debug!(target: "trie", "Error while iterating by prefix: {}", e);
match result {
Ok(completed) => Ok(completed),
Err(e) if matches!(*e, TrieError::IncompleteDatabase(_)) && allow_missing_nodes => Ok(false),
Err(e) => Err(format!("TrieDB iteration error: {}", e)),
}
}
/// Execute given closure for all key and values starting with prefix.
pub fn for_key_values_with_prefix<F: FnMut(&[u8], &[u8])>(&self, prefix: &[u8], mut f: F) {
self.trie_iter_inner(&self.root, Some(prefix), |k, v| { f(k, v); true }, None)
let _ = self.trie_iter_inner(&self.root, Some(prefix), |k, v| {f(&k, &v); true}, None, None, false);
}
}