Child trie api changes BREAKING (#4857)

Co-Authored-By: thiolliere <gui.thiolliere@gmail.com>
This commit is contained in:
cheme
2020-04-20 15:21:22 +02:00
committed by GitHub
parent 7d9aa81bfc
commit 4ffcf98d8d
64 changed files with 1514 additions and 1655 deletions
+21
View File
@@ -5536,6 +5536,26 @@ dependencies = [
"rust-argon2",
]
[[package]]
name = "ref-cast"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "077f197a31bfe7e4169145f9eca08d32705c6c6126c139c26793acdf163ac3ef"
dependencies = [
"ref-cast-impl",
]
[[package]]
name = "ref-cast-impl"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c36eb52b69b87c9e3a07387f476c88fd0dba9a1713b38e56617ed66b45392c1f"
dependencies = [
"proc-macro2",
"quote 1.0.3",
"syn 1.0.17",
]
[[package]]
name = "regex"
version = "1.3.6"
@@ -7715,6 +7735,7 @@ name = "sp-storage"
version = "2.0.0-dev"
dependencies = [
"impl-serde 0.2.3",
"ref-cast",
"serde",
"sp-debug-derive",
"sp-std",
+1 -1
View File
@@ -134,7 +134,7 @@
"0x5f3e4907f716ac89b6347d15ececedca0b6a45321efae92aea15e0740ec7afe7": "0x00000000",
"0x5f3e4907f716ac89b6347d15ececedca9220e172bed316605f73f1ff7b4ade98e54094c2d5af8ae10b91e1288f4f59f2946d7738f2c509b7effd909e5e9ba0ad": "0x00"
},
"children": {}
"childrenDefault": {}
}
}
}
+6 -6
View File
@@ -170,7 +170,7 @@ fn panic_execution_with_foreign_code_gives_error() {
vec![0u8; 32]
}
],
children: map![],
children_default: map![],
});
let r = executor_call::<NeverNativeValue, fn() -> _>(
@@ -206,7 +206,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() {
vec![0u8; 32]
}
],
children: map![],
children_default: map![],
});
let r = executor_call::<NeverNativeValue, fn() -> _>(
@@ -240,7 +240,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() {
},
<frame_system::BlockHash<Runtime>>::hashed_key_for(0) => vec![0u8; 32]
],
children: map![],
children_default: map![],
});
let r = executor_call::<NeverNativeValue, fn() -> _>(
@@ -282,7 +282,7 @@ fn successful_execution_with_foreign_code_gives_ok() {
},
<frame_system::BlockHash<Runtime>>::hashed_key_for(0) => vec![0u8; 32]
],
children: map![],
children_default: map![],
});
let r = executor_call::<NeverNativeValue, fn() -> _>(
@@ -704,7 +704,7 @@ fn panic_execution_gives_error() {
},
<frame_system::BlockHash<Runtime>>::hashed_key_for(0) => vec![0u8; 32]
],
children: map![],
children_default: map![],
});
let r = executor_call::<NeverNativeValue, fn() -> _>(
@@ -738,7 +738,7 @@ fn successful_execution_gives_ok() {
},
<frame_system::BlockHash<Runtime>>::hashed_key_for(0) => vec![0u8; 32]
],
children: map![],
children_default: map![],
});
let r = executor_call::<NeverNativeValue, fn() -> _>(
+1 -1
View File
@@ -143,7 +143,7 @@ fn transaction_fee_is_correct_ultimate() {
},
<frame_system::BlockHash<Runtime>>::hashed_key_for(0) => vec![0u8; 32]
],
children: map![],
children_default: map![],
});
let tip = 1_000_000;
+6 -8
View File
@@ -26,7 +26,7 @@ use sp_state_machine::{
ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction,
StorageCollection, ChildStorageCollection,
};
use sp_storage::{StorageData, StorageKey, ChildInfo};
use sp_storage::{StorageData, StorageKey, PrefixedStorageKey, ChildInfo};
use crate::{
blockchain::{
Backend as BlockchainBackend, well_known_cache_keys
@@ -280,6 +280,7 @@ impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where
Some(StorageKey(next_key))
}
}
/// Provides acess to storage primitives
pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
/// Given a `BlockId` and a key, return the value under the key in that block.
@@ -310,8 +311,7 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
fn child_storage(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<StorageData>>;
@@ -319,8 +319,7 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
fn child_storage_keys(
&self,
id: &BlockId<Block>,
child_storage_key: &StorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key_prefix: &StorageKey
) -> sp_blockchain::Result<Vec<StorageKey>>;
@@ -328,8 +327,7 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
fn child_storage_hash(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<Block::Hash>>;
@@ -351,7 +349,7 @@ pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
storage_key: Option<&StorageKey>,
storage_key: Option<&PrefixedStorageKey>,
key: &StorageKey
) -> sp_blockchain::Result<Vec<(NumberFor<Block>, u32)>>;
}
+3 -8
View File
@@ -26,7 +26,7 @@ use sp_runtime::{
},
generic::BlockId
};
use sp_core::ChangesTrieConfigurationRange;
use sp_core::{ChangesTrieConfigurationRange, storage::PrefixedStorageKey};
use sp_state_machine::StorageProof;
use sp_blockchain::{
HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache,
@@ -81,12 +81,7 @@ pub struct RemoteReadChildRequest<Header: HeaderT> {
/// Header of block at which read is performed.
pub header: Header,
/// Storage key for child.
pub storage_key: Vec<u8>,
/// Child trie source information.
pub child_info: Vec<u8>,
/// Child type, its required to resolve `child_info`
/// content and choose child implementation.
pub child_type: u32,
pub storage_key: PrefixedStorageKey,
/// Child storage key to read.
pub keys: Vec<Vec<u8>>,
/// Number of times to retry request. None means that default RETRY_COUNT is used.
@@ -110,7 +105,7 @@ pub struct RemoteChangesRequest<Header: HeaderT> {
/// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node.
pub tries_roots: (Header::Number, Header::Hash, Vec<Header::Hash>),
/// Optional Child Storage key to read.
pub storage_key: Option<Vec<u8>>,
pub storage_key: Option<PrefixedStorageKey>,
/// Storage key to read.
pub key: Vec<u8>,
/// Number of times to retry request. None means that default RETRY_COUNT is used.
+3 -4
View File
@@ -19,7 +19,7 @@ use sp_runtime::{
traits::{Block as BlockT},
};
use crate::{StorageProof, ChangesProof};
use sp_storage::{ChildInfo, StorageKey};
use sp_storage::{ChildInfo, StorageKey, PrefixedStorageKey};
/// Interface for providing block proving utilities.
pub trait ProofProvider<Block: BlockT> {
@@ -35,8 +35,7 @@ pub trait ProofProvider<Block: BlockT> {
fn read_child_proof(
&self,
id: &BlockId<Block>,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
keys: &mut dyn Iterator<Item=&[u8]>,
) -> sp_blockchain::Result<StorageProof>;
@@ -65,7 +64,7 @@ pub trait ProofProvider<Block: BlockT> {
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
storage_key: Option<&StorageKey>,
storage_key: Option<&PrefixedStorageKey>,
key: &StorageKey,
) -> sp_blockchain::Result<ChangesProof<Block::Header>>;
}
+14 -33
View File
@@ -74,17 +74,14 @@ impl<G: RuntimeGenesis, E> BuildStorage for ChainSpec<G, E> {
fn build_storage(&self) -> Result<Storage, String> {
match self.genesis.resolve()? {
Genesis::Runtime(gc) => gc.build_storage(),
Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage {
Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage {
top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(),
children: children_map.into_iter().map(|(sk, child_content)| {
let child_info = ChildInfo::resolve_child_info(
child_content.child_type,
child_content.child_info.as_slice(),
).expect("chain spec contains correct content").to_owned();
children_default: children_map.into_iter().map(|(storage_key, child_content)| {
let child_info = ChildInfo::new_default(storage_key.0.as_slice());
(
sk.0,
storage_key.0,
StorageChild {
data: child_content.data.into_iter().map(|(k, v)| (k.0, v.0)).collect(),
data: child_content.into_iter().map(|(k, v)| (k.0, v.0)).collect(),
child_info,
},
)
@@ -103,22 +100,13 @@ impl<G: RuntimeGenesis, E> BuildStorage for ChainSpec<G, E> {
type GenesisStorage = HashMap<StorageKey, StorageData>;
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[serde(deny_unknown_fields)]
struct ChildRawStorage {
data: GenesisStorage,
child_info: Vec<u8>,
child_type: u32,
}
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[serde(deny_unknown_fields)]
/// Storage content for genesis block.
struct RawGenesis {
top: GenesisStorage,
children: HashMap<StorageKey, ChildRawStorage>,
children_default: HashMap<StorageKey, GenesisStorage>,
}
#[derive(Serialize, Deserialize)]
@@ -291,23 +279,16 @@ impl<G: RuntimeGenesis, E: serde::Serialize + Clone> ChainSpec<G, E> {
let top = storage.top.into_iter()
.map(|(k, v)| (StorageKey(k), StorageData(v)))
.collect();
let children = storage.children.into_iter()
.map(|(sk, child)| {
let info = child.child_info.as_ref();
let (info, ci_type) = info.info();
(
StorageKey(sk),
ChildRawStorage {
data: child.data.into_iter()
.map(|(k, v)| (StorageKey(k), StorageData(v)))
.collect(),
child_info: info.to_vec(),
child_type: ci_type,
},
)})
let children_default = storage.children_default.into_iter()
.map(|(sk, child)| (
StorageKey(sk),
child.data.into_iter()
.map(|(k, v)| (StorageKey(k), StorageData(v)))
.collect(),
))
.collect();
Genesis::Raw(RawGenesis { top, children })
Genesis::Raw(RawGenesis { top, children_default })
},
(_, genesis) => genesis,
};
+16 -24
View File
@@ -77,10 +77,9 @@ impl<B: BlockT> BenchmarkingState<B> {
};
state.reopen()?;
let child_delta = genesis.children.into_iter().map(|(storage_key, child_content)| (
storage_key,
let child_delta = genesis.children_default.into_iter().map(|(_storage_key, child_content)| (
child_content.child_info,
child_content.data.into_iter().map(|(k, v)| (k, Some(v))),
child_content.child_info
));
let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root(
genesis.top.into_iter().map(|(k, v)| (k, Some(v))),
@@ -129,11 +128,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(storage_key, child_info, key)
self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(child_info, key)
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
@@ -142,11 +140,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
fn exists_child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(storage_key, child_info, key)
self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(child_info, key)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
@@ -155,11 +152,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(storage_key, child_info, key)
self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
@@ -176,24 +172,22 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
f: F,
) {
if let Some(ref state) = *self.state.borrow() {
state.for_keys_in_child_storage(storage_key, child_info, f)
state.for_keys_in_child_storage(child_info, f)
}
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
if let Some(ref state) = *self.state.borrow() {
state.for_child_keys_with_prefix(storage_key, child_info, prefix, f)
state.for_child_keys_with_prefix(child_info, prefix, f)
}
}
@@ -205,13 +199,12 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (B::Hash, bool, Self::Transaction) where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
{
self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(storage_key, child_info, delta))
self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta))
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
@@ -224,11 +217,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for BenchmarkingState<B> {
fn child_keys(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) -> Vec<Vec<u8>> {
self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(storage_key, child_info, prefix))
self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(child_info, prefix))
}
fn as_trie_backend(&mut self)
@@ -26,6 +26,7 @@ use sp_trie::MemoryDB;
use sc_client_api::backend::PrunableStateChangesTrieStorage;
use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache};
use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash};
use sp_core::storage::PrefixedStorageKey;
use sp_database::Transaction;
use sp_runtime::traits::{
Block as BlockT, Header as HeaderT, HashFor, NumberFor, One, Zero, CheckedSub,
@@ -482,7 +483,7 @@ where
fn with_cached_changed_keys(
&self,
root: &Block::Hash,
functor: &mut dyn FnMut(&HashMap<Option<Vec<u8>>, HashSet<Vec<u8>>>),
functor: &mut dyn FnMut(&HashMap<Option<PrefixedStorageKey>, HashSet<Vec<u8>>>),
) -> bool {
self.build_cache.read().with_changed_keys(root, functor)
}
+20 -33
View File
@@ -159,11 +159,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.child_storage(storage_key, child_info, key)
self.state.child_storage(child_info, key)
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
@@ -172,11 +171,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
fn exists_child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.state.exists_child_storage(storage_key, child_info, key)
self.state.exists_child_storage(child_info, key)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
@@ -185,11 +183,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.next_child_storage_key(storage_key, child_info, key)
self.state.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
@@ -202,21 +199,19 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
f: F,
) {
self.state.for_keys_in_child_storage(storage_key, child_info, f)
self.state.for_keys_in_child_storage(child_info, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f)
self.state.for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<I>(&self, delta: I) -> (B::Hash, Self::Transaction)
@@ -228,14 +223,13 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (B::Hash, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
{
self.state.child_storage_root(storage_key, child_info, delta)
self.state.child_storage_root(child_info, delta)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
@@ -248,11 +242,10 @@ impl<B: BlockT> StateBackend<HashFor<B>> for RefTrackingState<B> {
fn child_keys(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) -> Vec<Vec<u8>> {
self.state.child_keys(storage_key, child_info, prefix)
self.state.child_keys(child_info, prefix)
}
fn as_trie_backend(&mut self)
@@ -631,16 +624,10 @@ impl<Block: BlockT> sc_client_api::backend::BlockImportOperation<Block> for Bloc
return Err(sp_blockchain::Error::GenesisInvalid.into());
}
for child_key in storage.children.keys() {
if !well_known_keys::is_child_storage_key(&child_key) {
return Err(sp_blockchain::Error::GenesisInvalid.into());
}
}
let child_delta = storage.children.into_iter().map(|(storage_key, child_content)| (
storage_key,
child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info),
);
let child_delta = storage.children_default.into_iter().map(|(_storage_key, child_content)|(
child_content.child_info,
child_content.data.into_iter().map(|(k, v)| (k, Some(v))),
));
let mut changes_trie_config: Option<ChangesTrieConfiguration> = None;
let (root, transaction) = self.old_state.full_storage_root(
@@ -1808,7 +1795,7 @@ pub(crate) mod tests {
op.reset_storage(Storage {
top: storage.iter().cloned().collect(),
children: Default::default(),
children_default: Default::default(),
}).unwrap();
op.set_block_data(
header.clone(),
@@ -1894,7 +1881,7 @@ pub(crate) mod tests {
op.reset_storage(Storage {
top: storage.iter().cloned().collect(),
children: Default::default(),
children_default: Default::default(),
}).unwrap();
key = op.db_updates.insert(EMPTY_PREFIX, b"hello");
+29 -43
View File
@@ -542,11 +542,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Cachin
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
let key = (storage_key.to_vec(), key.to_vec());
let key = (child_info.storage_key().to_vec(), key.to_vec());
let local_cache = self.cache.local_cache.upgradable_read();
if let Some(entry) = local_cache.child_storage.get(&key).cloned() {
trace!("Found in local cache: {:?}", key);
@@ -564,7 +563,7 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Cachin
}
}
trace!("Cache miss: {:?}", key);
let value = self.state.child_storage(storage_key, child_info, &key.1[..])?;
let value = self.state.child_storage(child_info, &key.1[..])?;
// just pass it through the usage counter
let value = self.usage.tally_child_key_read(&key, value, false);
@@ -579,20 +578,18 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Cachin
fn exists_child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.state.exists_child_storage(storage_key, child_info, key)
self.state.exists_child_storage(child_info, key)
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
f: F,
) {
self.state.for_keys_in_child_storage(storage_key, child_info, f)
self.state.for_keys_in_child_storage(child_info, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
@@ -601,11 +598,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Cachin
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.state.next_child_storage_key(storage_key, child_info, key)
self.state.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
@@ -618,12 +614,11 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Cachin
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f)
self.state.for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<I>(&self, delta: I) -> (B::Hash, Self::Transaction)
@@ -635,14 +630,13 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Cachin
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (B::Hash, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
{
self.state.child_storage_root(storage_key, child_info, delta)
self.state.child_storage_root(child_info, delta)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
@@ -655,11 +649,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Cachin
fn child_keys(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) -> Vec<Vec<u8>> {
self.state.child_keys(storage_key, child_info, prefix)
self.state.child_keys(child_info, prefix)
}
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, HashFor<B>>> {
@@ -758,11 +751,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Syncin
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().child_storage(storage_key, child_info, key)
self.caching_state().child_storage(child_info, key)
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
@@ -771,20 +763,18 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Syncin
fn exists_child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
self.caching_state().exists_child_storage(storage_key, child_info, key)
self.caching_state().exists_child_storage(child_info, key)
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
f: F,
) {
self.caching_state().for_keys_in_child_storage(storage_key, child_info, f)
self.caching_state().for_keys_in_child_storage(child_info, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
@@ -793,11 +783,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Syncin
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.caching_state().next_child_storage_key(storage_key, child_info, key)
self.caching_state().next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
@@ -810,12 +799,11 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Syncin
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.caching_state().for_child_keys_with_prefix(storage_key, child_info, prefix, f)
self.caching_state().for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<I>(&self, delta: I) -> (B::Hash, Self::Transaction)
@@ -827,14 +815,13 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Syncin
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (B::Hash, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
{
self.caching_state().child_storage_root(storage_key, child_info, delta)
self.caching_state().child_storage_root(child_info, delta)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
@@ -847,11 +834,10 @@ impl<S: StateBackend<HashFor<B>>, B: BlockT> StateBackend<HashFor<B>> for Syncin
fn child_keys(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) -> Vec<Vec<u8>> {
self.caching_state().child_keys(storage_key, child_info, prefix)
self.caching_state().child_keys(child_info, prefix)
}
fn as_trie_backend(&mut self) -> Option<&TrieBackend<Self::TrieBackendStorage, HashFor<B>>> {
@@ -186,7 +186,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) {
b"foo".to_vec() => b"bar".to_vec(),
b"baz".to_vec() => b"bar".to_vec()
],
children: map![],
children_default: map![],
});
assert_eq!(ext, expected);
}
@@ -220,7 +220,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) {
b"aab".to_vec() => b"2".to_vec(),
b"bbb".to_vec() => b"5".to_vec()
],
children: map![],
children_default: map![],
});
assert_eq!(expected, ext);
}
+32 -39
View File
@@ -30,7 +30,7 @@ use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}};
use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler};
use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters};
use sp_core::{
storage::{StorageKey, ChildInfo},
storage::{StorageKey, PrefixedStorageKey, ChildInfo, ChildType},
hexdisplay::HexDisplay
};
use sp_consensus::{
@@ -1522,37 +1522,28 @@ impl<B: BlockT, H: ExHashT> Protocol<B, H> {
trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})",
request.id, who, HexDisplay::from(&request.storage_key), keys_str(), request.block);
let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) {
match self.context_data.chain.read_child_proof(
&BlockId::Hash(request.block),
&request.storage_key,
child_info,
&mut request.keys.iter().map(AsRef::as_ref),
) {
Ok(proof) => proof,
Err(error) => {
trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}",
request.id,
who,
HexDisplay::from(&request.storage_key),
keys_str(),
request.block,
error
);
StorageProof::empty()
}
let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key);
let child_info = match ChildType::from_prefixed_key(prefixed_key) {
Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)),
None => Err("Invalid child storage key".into()),
};
let proof = match child_info.and_then(|child_info| self.context_data.chain.read_child_proof(
&BlockId::Hash(request.block),
&child_info,
&mut request.keys.iter().map(AsRef::as_ref),
)) {
Ok(proof) => proof,
Err(error) => {
trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}",
request.id,
who,
HexDisplay::from(&request.storage_key),
keys_str(),
request.block,
error
);
StorageProof::empty()
}
} else {
trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}",
request.id,
who,
HexDisplay::from(&request.storage_key),
keys_str(),
request.block,
"invalid child info and type",
);
StorageProof::empty()
};
self.send_message(
&who,
@@ -1610,14 +1601,16 @@ impl<B: BlockT, H: ExHashT> Protocol<B, H> {
request.first,
request.last
);
let storage_key = request.storage_key.map(|sk| StorageKey(sk));
let key = StorageKey(request.key);
let prefixed_key = request.storage_key.as_ref()
.map(|storage_key| PrefixedStorageKey::new_ref(storage_key));
let (first, last, min, max) = (request.first, request.last, request.min, request.max);
let proof = match self.context_data.chain.key_changes_proof(
request.first,
request.last,
request.min,
request.max,
storage_key.as_ref(),
first,
last,
min,
max,
prefixed_key,
&key,
) {
Ok(proof) => proof,
@@ -1625,8 +1618,8 @@ impl<B: BlockT, H: ExHashT> Protocol<B, H> {
trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}",
request.id,
who,
if let Some(sk) = storage_key {
format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0))
if let Some(sk) = request.storage_key.as_ref() {
format!("{} : {}", HexDisplay::from(sk), HexDisplay::from(&key.0))
} else {
HexDisplay::from(&key.0).to_string()
},
@@ -58,7 +58,7 @@ use sc_client::light::fetcher;
use sc_client_api::StorageProof;
use sc_peerset::ReputationChange;
use sp_core::{
storage::{ChildInfo, StorageKey},
storage::{ChildInfo, ChildType,StorageKey, PrefixedStorageKey},
hexdisplay::HexDisplay,
};
use smallvec::SmallVec;
@@ -617,35 +617,27 @@ where
let block = Decode::decode(&mut request.block.as_ref())?;
let proof =
if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) {
match self.chain.read_child_proof(
&BlockId::Hash(block),
&request.storage_key,
info,
&mut request.keys.iter().map(AsRef::as_ref)
) {
Ok(proof) => proof,
Err(error) => {
log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}",
peer,
HexDisplay::from(&request.storage_key),
fmt_keys(request.keys.first(), request.keys.last()),
request.block,
error);
StorageProof::empty()
}
}
} else {
let prefixed_key = PrefixedStorageKey::new_ref(&request.storage_key);
let child_info = match ChildType::from_prefixed_key(prefixed_key) {
Some((ChildType::ParentKeyId, storage_key)) => Ok(ChildInfo::new_default(storage_key)),
None => Err("Invalid child storage key".into()),
};
let proof = match child_info.and_then(|child_info| self.chain.read_child_proof(
&BlockId::Hash(block),
&child_info,
&mut request.keys.iter().map(AsRef::as_ref)
)) {
Ok(proof) => proof,
Err(error) => {
log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}",
peer,
HexDisplay::from(&request.storage_key),
fmt_keys(request.keys.first(), request.keys.last()),
request.block,
"invalid child info and type"
);
error);
StorageProof::empty()
};
}
};
let response = {
let r = api::v1::light::RemoteReadResponse { proof: proof.encode() };
@@ -704,23 +696,18 @@ where
let min = Decode::decode(&mut request.min.as_ref())?;
let max = Decode::decode(&mut request.max.as_ref())?;
let key = StorageKey(request.key.clone());
let storage_key =
if request.storage_key.is_empty() {
None
} else {
Some(StorageKey(request.storage_key.clone()))
};
let storage_key = if request.storage_key.is_empty() {
None
} else {
Some(PrefixedStorageKey::new_ref(&request.storage_key))
};
let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key.as_ref(), &key) {
let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key, &key) {
Ok(proof) => proof,
Err(error) => {
log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}",
peer,
if let Some(sk) = storage_key {
format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0))
} else {
HexDisplay::from(&key.0).to_string()
},
format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)),
request.first,
request.last,
error);
@@ -1092,9 +1079,7 @@ fn serialize_request<B: Block>(request: &Request<B>) -> Result<Vec<u8>, prost::E
Request::ReadChild { request, .. } => {
let r = api::v1::light::RemoteReadChildRequest {
block: request.block.encode(),
storage_key: request.storage_key.clone(),
child_type: request.child_type.clone(),
child_info: request.child_info.clone(),
storage_key: request.storage_key.clone().into_inner(),
keys: request.keys.clone(),
};
api::v1::light::request::Request::RemoteReadChildRequest(r)
@@ -1113,7 +1098,8 @@ fn serialize_request<B: Block>(request: &Request<B>) -> Result<Vec<u8>, prost::E
last: request.last_block.1.encode(),
min: request.tries_roots.1.encode(),
max: request.max_block.1.encode(),
storage_key: request.storage_key.clone().unwrap_or_default(),
storage_key: request.storage_key.clone().map(|s| s.into_inner())
.unwrap_or_default(),
key: request.key.clone(),
};
api::v1::light::request::Request::RemoteChangesRequest(r)
@@ -1343,8 +1329,6 @@ mod tests {
use super::{Event, LightClientHandler, Request, Response, OutboundProtocol, PeerStatus};
use void::Void;
const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"foobarbaz");
type Block = sp_runtime::generic::Block<Header<u64, BlakeTwo256>, substrate_test_runtime::Extrinsic>;
type Handler = LightClientHandler<Block>;
type Swarm = libp2p::swarm::Swarm<Handler>;
@@ -1894,15 +1878,13 @@ mod tests {
#[test]
fn receives_remote_read_child_response() {
let info = CHILD_INFO.info();
let mut chan = oneshot::channel();
let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]);
let request = fetcher::RemoteReadChildRequest {
header: dummy_header(),
block: Default::default(),
storage_key: b":child_storage:sub".to_vec(),
storage_key: child_info.prefixed_storage_key(),
keys: vec![b":key".to_vec()],
child_info: info.0.to_vec(),
child_type: info.1,
retry_count: None,
};
issue_request(Request::ReadChild { request, sender: chan.0 });
@@ -1997,15 +1979,13 @@ mod tests {
#[test]
fn send_receive_read_child() {
let info = CHILD_INFO.info();
let chan = oneshot::channel();
let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]);
let request = fetcher::RemoteReadChildRequest {
header: dummy_header(),
block: Default::default(),
storage_key: b":child_storage:sub".to_vec(),
storage_key: child_info.prefixed_storage_key(),
keys: vec![b":key".to_vec()],
child_info: info.0.to_vec(),
child_type: info.1,
retry_count: None,
};
send_receive(Request::ReadChild { request, sender: chan.0 });
@@ -477,11 +477,6 @@ pub mod generic {
pub block: H,
/// Child Storage key.
pub storage_key: Vec<u8>,
/// Child trie source information.
pub child_info: Vec<u8>,
/// Child type, its required to resolve `child_info`
/// content and choose child implementation.
pub child_type: u32,
/// Storage key.
pub keys: Vec<Vec<u8>>,
}
@@ -67,13 +67,9 @@ message RemoteReadResponse {
message RemoteReadChildRequest {
// Block at which to perform call.
bytes block = 2;
// Child Storage key.
// Child Storage key, this is relative
// to the child type storage location.
bytes storage_key = 3;
// Child trie source information.
bytes child_info = 4;
/// Child type, its required to resolve `child_info`
/// content and choose child implementation.
uint32 child_type = 5;
// Storage keys.
repeated bytes keys = 6;
}
@@ -0,0 +1,69 @@
// Copyright 2017-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Substrate state API.
use jsonrpc_derive::rpc;
use sp_core::storage::{StorageKey, PrefixedStorageKey, StorageData};
use crate::state::error::FutureResult;
pub use self::gen_client::Client as ChildStateClient;
/// Substrate child state API
///
/// Note that all `PrefixedStorageKey` are desierialized
/// from json and not guaranted valid.
#[rpc]
pub trait ChildStateApi<Hash> {
/// RPC Metadata
type Metadata;
/// Returns the keys with prefix from a child storage, leave empty to get all the keys
#[rpc(name = "childstate_getKeys")]
fn storage_keys(
&self,
child_storage_key: PrefixedStorageKey,
prefix: StorageKey,
hash: Option<Hash>
) -> FutureResult<Vec<StorageKey>>;
/// Returns a child storage entry at a specific block's state.
#[rpc(name = "childstate_getStorage")]
fn storage(
&self,
child_storage_key: PrefixedStorageKey,
key: StorageKey,
hash: Option<Hash>
) -> FutureResult<Option<StorageData>>;
/// Returns the hash of a child storage entry at a block's state.
#[rpc(name = "childstate_getStorageHash")]
fn storage_hash(
&self,
child_storage_key: PrefixedStorageKey,
key: StorageKey,
hash: Option<Hash>
) -> FutureResult<Option<Hash>>;
/// Returns the size of a child storage entry at a block's state.
#[rpc(name = "childstate_getStorageSize")]
fn storage_size(
&self,
child_storage_key: PrefixedStorageKey,
key: StorageKey,
hash: Option<Hash>
) -> FutureResult<Option<u64>>;
}
+1
View File
@@ -34,4 +34,5 @@ pub mod author;
pub mod chain;
pub mod offchain;
pub mod state;
pub mod child_state;
pub mod system;
-44
View File
@@ -72,50 +72,6 @@ pub trait StateApi<Hash> {
#[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))]
fn storage_size(&self, key: StorageKey, hash: Option<Hash>) -> FutureResult<Option<u64>>;
/// Returns the keys with prefix from a child storage, leave empty to get all the keys
#[rpc(name = "state_getChildKeys")]
fn child_storage_keys(
&self,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
prefix: StorageKey,
hash: Option<Hash>
) -> FutureResult<Vec<StorageKey>>;
/// Returns a child storage entry at a specific block's state.
#[rpc(name = "state_getChildStorage")]
fn child_storage(
&self,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
hash: Option<Hash>
) -> FutureResult<Option<StorageData>>;
/// Returns the hash of a child storage entry at a block's state.
#[rpc(name = "state_getChildStorageHash")]
fn child_storage_hash(
&self,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
hash: Option<Hash>
) -> FutureResult<Option<Hash>>;
/// Returns the size of a child storage entry at a block's state.
#[rpc(name = "state_getChildStorageSize")]
fn child_storage_size(
&self,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
hash: Option<Hash>
) -> FutureResult<Option<u64>>;
/// Returns the runtime metadata as an opaque blob.
#[rpc(name = "state_getMetadata")]
fn metadata(&self, hash: Option<Hash>) -> FutureResult<Bytes>;
+111 -103
View File
@@ -28,7 +28,7 @@ use rpc::{Result as RpcResult, futures::{Future, future::result}};
use sc_rpc_api::Subscriptions;
use sc_client::{light::{blockchain::RemoteBlockchain, fetcher::Fetcher}};
use sp_core::{Bytes, storage::{StorageKey, StorageData, StorageChangeSet}};
use sp_core::{Bytes, storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet}};
use sp_version::RuntimeVersion;
use sp_runtime::traits::Block as BlockT;
@@ -37,6 +37,7 @@ use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt};
use self::error::{Error, FutureResult};
pub use sc_rpc_api::state::*;
pub use sc_rpc_api::child_state::*;
use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend};
use sp_blockchain::{HeaderMetadata, HeaderBackend};
@@ -103,49 +104,6 @@ pub trait StateBackend<Block: BlockT, Client>: Send + Sync + 'static
.map(|x| x.map(|x| x.0.len() as u64)))
}
/// Returns the keys with prefix from a child storage, leave empty to get all the keys
fn child_storage_keys(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>>;
/// Returns a child storage entry at a specific block's state.
fn child_storage(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
) -> FutureResult<Option<StorageData>>;
/// Returns the hash of a child storage entry at a block's state.
fn child_storage_hash(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>>;
/// Returns the size of a child storage entry at a block's state.
fn child_storage_size(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
) -> FutureResult<Option<u64>> {
Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key)
.map(|x| x.map(|x| x.0.len() as u64)))
}
/// Returns the runtime metadata as an opaque blob.
fn metadata(&self, block: Option<Block::Hash>) -> FutureResult<Bytes>;
@@ -204,7 +162,7 @@ pub trait StateBackend<Block: BlockT, Client>: Send + Sync + 'static
pub fn new_full<BE, Block: BlockT, Client>(
client: Arc<Client>,
subscriptions: Subscriptions,
) -> State<Block, Client>
) -> (State<Block, Client>, ChildState<Block, Client>)
where
Block: BlockT + 'static,
BE: Backend<Block> + 'static,
@@ -214,9 +172,11 @@ pub fn new_full<BE, Block: BlockT, Client>(
+ ProvideRuntimeApi<Block> + Send + Sync + 'static,
Client::Api: Metadata<Block, Error = sp_blockchain::Error>,
{
State {
backend: Box::new(self::state_full::FullState::new(client, subscriptions)),
}
let child_backend = Box::new(
self::state_full::FullState::new(client.clone(), subscriptions.clone())
);
let backend = Box::new(self::state_full::FullState::new(client, subscriptions));
(State { backend }, ChildState { backend: child_backend })
}
/// Create new state API that works on light node.
@@ -225,7 +185,7 @@ pub fn new_light<BE, Block: BlockT, Client, F: Fetcher<Block>>(
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
) -> State<Block, Client>
) -> (State<Block, Client>, ChildState<Block, Client>)
where
Block: BlockT + 'static,
BE: Backend<Block> + 'static,
@@ -235,14 +195,20 @@ pub fn new_light<BE, Block: BlockT, Client, F: Fetcher<Block>>(
+ Send + Sync + 'static,
F: Send + Sync + 'static,
{
State {
backend: Box::new(self::state_light::LightState::new(
let child_backend = Box::new(self::state_light::LightState::new(
client.clone(),
subscriptions.clone(),
remote_blockchain.clone(),
fetcher.clone(),
));
let backend = Box::new(self::state_light::LightState::new(
client,
subscriptions,
remote_blockchain,
fetcher,
)),
}
));
(State { backend }, ChildState { backend: child_backend })
}
/// State API with subscriptions support.
@@ -307,50 +273,6 @@ impl<Block, Client> StateApi<Block::Hash> for State<Block, Client>
self.backend.storage_size(block, key)
}
fn child_storage(
&self,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Option<StorageData>> {
self.backend.child_storage(block, child_storage_key, child_info, child_type, key)
}
fn child_storage_keys(
&self,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key_prefix: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Vec<StorageKey>> {
self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix)
}
fn child_storage_hash(
&self,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Option<Block::Hash>> {
self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key)
}
fn child_storage_size(
&self,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Option<u64>> {
self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key)
}
fn metadata(&self, block: Option<Block::Hash>) -> FutureResult<Bytes> {
self.backend.metadata(block)
}
@@ -402,12 +324,98 @@ impl<Block, Client> StateApi<Block::Hash> for State<Block, Client>
}
}
/// Child state backend API.
pub trait ChildStateBackend<Block: BlockT, Client>: Send + Sync + 'static
where
Block: BlockT + 'static,
Client: Send + Sync + 'static,
{
/// Returns the keys with prefix from a child storage,
/// leave prefix empty to get all the keys.
fn storage_keys(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>>;
/// Returns a child storage entry at a specific block's state.
fn storage(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> FutureResult<Option<StorageData>>;
/// Returns the hash of a child storage entry at a block's state.
fn storage_hash(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>>;
/// Returns the size of a child storage entry at a block's state.
fn storage_size(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> FutureResult<Option<u64>> {
Box::new(self.storage(block, storage_key, key)
.map(|x| x.map(|x| x.0.len() as u64)))
}
}
/// Child state API with subscriptions support.
pub struct ChildState<Block, Client> {
backend: Box<dyn ChildStateBackend<Block, Client>>,
}
impl<Block, Client> ChildStateApi<Block::Hash> for ChildState<Block, Client>
where
Block: BlockT + 'static,
Client: Send + Sync + 'static,
{
type Metadata = crate::metadata::Metadata;
fn storage(
&self,
storage_key: PrefixedStorageKey,
key: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Option<StorageData>> {
self.backend.storage(block, storage_key, key)
}
fn storage_keys(
&self,
storage_key: PrefixedStorageKey,
key_prefix: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Vec<StorageKey>> {
self.backend.storage_keys(block, storage_key, key_prefix)
}
fn storage_hash(
&self,
storage_key: PrefixedStorageKey,
key: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Option<Block::Hash>> {
self.backend.storage_hash(block, storage_key, key)
}
fn storage_size(
&self,
storage_key: PrefixedStorageKey,
key: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Option<u64>> {
self.backend.storage_size(block, storage_key, key)
}
}
fn client_err(err: sp_blockchain::Error) -> Error {
Error::Client(Box::new(err))
}
const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type";
fn child_resolution_error() -> sp_blockchain::Error {
sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string())
}
+80 -63
View File
@@ -29,7 +29,8 @@ use sc_client_api::backend::Backend;
use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata, HeaderBackend};
use sc_client::BlockchainEvents;
use sp_core::{
Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo},
Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet,
ChildInfo, ChildType, PrefixedStorageKey},
};
use sp_version::RuntimeVersion;
use sp_runtime::{
@@ -38,7 +39,7 @@ use sp_runtime::{
use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt};
use super::{StateBackend, error::{FutureResult, Error, Result}, client_err, child_resolution_error};
use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error, Result}, client_err};
use std::marker::PhantomData;
use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider};
@@ -308,66 +309,6 @@ impl<BE, Block, Client> StateBackend<Block, Client> for FullState<BE, Block, Cli
.map_err(client_err)))
}
fn child_storage_keys(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.child_storage_keys(
&BlockId::Hash(block),
&child_storage_key,
ChildInfo::resolve_child_info(child_type, &child_info.0[..])
.ok_or_else(child_resolution_error)?,
&prefix,
))
.map_err(client_err)))
}
fn child_storage(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
) -> FutureResult<Option<StorageData>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.child_storage(
&BlockId::Hash(block),
&child_storage_key,
ChildInfo::resolve_child_info(child_type, &child_info.0[..])
.ok_or_else(child_resolution_error)?,
&key,
))
.map_err(client_err)))
}
fn child_storage_hash(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.child_storage_hash(
&BlockId::Hash(block),
&child_storage_key,
ChildInfo::resolve_child_info(child_type, &child_info.0[..])
.ok_or_else(child_resolution_error)?,
&key,
))
.map_err(client_err)))
}
fn metadata(&self, block: Option<Block::Hash>) -> FutureResult<Bytes> {
Box::new(result(
self.block_or_best(block)
@@ -493,7 +434,7 @@ impl<BE, Block, Client> StateBackend<Block, Client> for FullState<BE, Block, Cli
let block = self.client.info().best_hash;
let changes = keys
.into_iter()
.map(|key| self.storage(Some(block.clone()).into(), key.clone())
.map(|key| StateBackend::storage(self, Some(block.clone()).into(), key.clone())
.map(|val| (key.clone(), val))
.wait()
.unwrap_or_else(|_| (key, None))
@@ -530,6 +471,82 @@ impl<BE, Block, Client> StateBackend<Block, Client> for FullState<BE, Block, Cli
}
}
impl<BE, Block, Client> ChildStateBackend<Block, Client> for FullState<BE, Block, Client> where
Block: BlockT + 'static,
BE: Backend<Block> + 'static,
Client: ExecutorProvider<Block> + StorageProvider<Block, BE> + HeaderBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error> + BlockchainEvents<Block>
+ CallApiAt<Block, Error = sp_blockchain::Error> + ProvideRuntimeApi<Block>
+ Send + Sync + 'static,
Client::Api: Metadata<Block, Error = sp_blockchain::Error>,
{
fn storage_keys(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key),
None => return Err("Invalid child storage key".into()),
};
self.client.child_storage_keys(
&BlockId::Hash(block),
&child_info,
&prefix,
)
})
.map_err(client_err)))
}
fn storage(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> FutureResult<Option<StorageData>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key),
None => return Err("Invalid child storage key".into()),
};
self.client.child_storage(
&BlockId::Hash(block),
&child_info,
&key,
)
})
.map_err(client_err)))
}
fn storage_hash(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key),
None => return Err("Invalid child storage key".into()),
};
self.client.child_storage_hash(
&BlockId::Hash(block),
&child_info,
&key,
)
})
.map_err(client_err)))
}
}
/// Splits passed range into two subranges where:
/// - first range has at least one element in it;
/// - second range (optionally) starts at given `middle` element.
+64 -65
View File
@@ -48,17 +48,19 @@ use sc_client::{
},
};
use sp_core::{
Bytes, OpaqueMetadata, storage::{StorageKey, StorageData, StorageChangeSet},
Bytes, OpaqueMetadata,
storage::{StorageKey, PrefixedStorageKey, StorageData, StorageChangeSet},
};
use sp_version::RuntimeVersion;
use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}};
use super::{StateBackend, error::{FutureResult, Error}, client_err};
use super::{StateBackend, ChildStateBackend, error::{FutureResult, Error}, client_err};
/// Storage data map of storage keys => (optional) storage value.
type StorageMap = HashMap<StorageKey, Option<StorageData>>;
/// State API backend for light nodes.
#[derive(Clone)]
pub struct LightState<Block: BlockT, F: Fetcher<Block>, Client> {
client: Arc<Client>,
subscriptions: Subscriptions,
@@ -233,69 +235,7 @@ impl<Block, F, Client> StateBackend<Block, Client> for LightState<Block, F, Clie
block: Option<Block::Hash>,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>> {
Box::new(self
.storage(block, key)
.and_then(|maybe_storage|
result(Ok(maybe_storage.map(|storage| HashFor::<Block>::hash(&storage.0))))
)
)
}
fn child_storage_keys(
&self,
_block: Option<Block::Hash>,
_child_storage_key: StorageKey,
_child_info: StorageKey,
_child_type: u32,
_prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>> {
Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient))))
}
fn child_storage(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
) -> FutureResult<Option<StorageData>> {
let block = self.block_or_best(block);
let fetcher = self.fetcher.clone();
let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block)
.then(move |result| match result {
Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest {
block,
header,
storage_key: child_storage_key.0,
child_info: child_info.0,
child_type,
keys: vec![key.0.clone()],
retry_count: Default::default(),
}).then(move |result| ready(result
.map(|mut data| data
.remove(&key.0)
.expect("successful result has entry for all keys; qed")
.map(StorageData)
)
.map_err(client_err)
))),
Err(error) => Either::Right(ready(Err(error))),
});
Box::new(child_storage.boxed().compat())
}
fn child_storage_hash(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
child_info: StorageKey,
child_type: u32,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>> {
Box::new(self
.child_storage(block, child_storage_key, child_info, child_type, key)
Box::new(StateBackend::storage(self, block, key)
.and_then(|maybe_storage|
result(Ok(maybe_storage.map(|storage| HashFor::<Block>::hash(&storage.0))))
)
@@ -518,6 +458,65 @@ impl<Block, F, Client> StateBackend<Block, Client> for LightState<Block, F, Clie
}
}
impl<Block, F, Client> ChildStateBackend<Block, Client> for LightState<Block, F, Client>
where
Block: BlockT,
Client: BlockchainEvents<Block> + HeaderBackend<Block> + Send + Sync + 'static,
F: Fetcher<Block> + 'static
{
fn storage_keys(
&self,
_block: Option<Block::Hash>,
_storage_key: PrefixedStorageKey,
_prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>> {
Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient))))
}
fn storage(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> FutureResult<Option<StorageData>> {
let block = self.block_or_best(block);
let fetcher = self.fetcher.clone();
let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block)
.then(move |result| match result {
Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest {
block,
header,
storage_key,
keys: vec![key.0.clone()],
retry_count: Default::default(),
}).then(move |result| ready(result
.map(|mut data| data
.remove(&key.0)
.expect("successful result has entry for all keys; qed")
.map(StorageData)
)
.map_err(client_err)
))),
Err(error) => Either::Right(ready(Err(error))),
});
Box::new(child_storage.boxed().compat())
}
fn storage_hash(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>> {
Box::new(ChildStateBackend::storage(self, block, storage_key, key)
.and_then(|maybe_storage|
result(Ok(maybe_storage.map(|storage| HashFor::<Block>::hash(&storage.0))))
)
)
}
}
/// Resolve header by hash.
fn resolve_header<Block: BlockT, F: Fetcher<Block>>(
remote_blockchain: &dyn RemoteBlockchain<Block>,
+24 -31
View File
@@ -21,7 +21,7 @@ use self::error::Error;
use std::sync::Arc;
use assert_matches::assert_matches;
use futures01::stream::Stream;
use sp_core::{storage::{well_known_keys, ChildInfo}, ChangesTrieConfiguration};
use sp_core::{storage::ChildInfo, ChangesTrieConfiguration};
use sp_core::hash::H256;
use sc_block_builder::BlockBuilderProvider;
use sp_io::hashing::blake2_256;
@@ -32,26 +32,28 @@ use substrate_test_runtime_client::{
};
use sp_runtime::generic::BlockId;
const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"unique_id");
const STORAGE_KEY: &[u8] = b"child";
fn prefixed_storage_key() -> PrefixedStorageKey {
let child_info = ChildInfo::new_default(&STORAGE_KEY[..]);
child_info.prefixed_storage_key()
}
#[test]
fn should_return_storage() {
const KEY: &[u8] = b":mock";
const VALUE: &[u8] = b"hello world";
const STORAGE_KEY: &[u8] = b":child_storage:default:child";
const CHILD_VALUE: &[u8] = b"hello world !";
let child_info = ChildInfo::new_default(STORAGE_KEY);
let mut core = tokio::runtime::Runtime::new().unwrap();
let client = TestClientBuilder::new()
.add_extra_storage(KEY.to_vec(), VALUE.to_vec())
.add_extra_child_storage(STORAGE_KEY.to_vec(), CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec())
.add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec())
.build();
let genesis_hash = client.genesis_hash();
let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor())));
let (client, child) = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor())));
let key = StorageKey(KEY.to_vec());
let storage_key = StorageKey(STORAGE_KEY.to_vec());
let (child_info, child_type) = CHILD_INFO.info();
let child_info = StorageKey(child_info.to_vec());
assert_eq!(
client.storage(key.clone(), Some(genesis_hash).into()).wait()
@@ -69,7 +71,7 @@ fn should_return_storage() {
);
assert_eq!(
core.block_on(
client.child_storage(storage_key, child_info, child_type, key, Some(genesis_hash).into())
child.storage(prefixed_storage_key(), key, Some(genesis_hash).into())
.map(|x| x.map(|x| x.0.len()))
).unwrap().unwrap() as usize,
CHILD_VALUE.len(),
@@ -79,45 +81,36 @@ fn should_return_storage() {
#[test]
fn should_return_child_storage() {
let (child_info, child_type) = CHILD_INFO.info();
let child_info = StorageKey(child_info.to_vec());
let child_info = ChildInfo::new_default(STORAGE_KEY);
let core = tokio::runtime::Runtime::new().unwrap();
let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new()
.add_child_storage("test", "key", CHILD_INFO, vec![42_u8])
.add_child_storage(&child_info, "key", vec![42_u8])
.build());
let genesis_hash = client.genesis_hash();
let client = new_full(client, Subscriptions::new(Arc::new(core.executor())));
let child_key = StorageKey(
well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().chain(b"test").cloned().collect()
);
let (_client, child) = new_full(client, Subscriptions::new(Arc::new(core.executor())));
let child_key = prefixed_storage_key();
let key = StorageKey(b"key".to_vec());
assert_matches!(
client.child_storage(
child.storage(
child_key.clone(),
child_info.clone(),
child_type,
key.clone(),
Some(genesis_hash).into(),
).wait(),
Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1
);
assert_matches!(
client.child_storage_hash(
child.storage_hash(
child_key.clone(),
child_info.clone(),
child_type,
key.clone(),
Some(genesis_hash).into(),
).wait().map(|x| x.is_some()),
Ok(true)
);
assert_matches!(
client.child_storage_size(
child.storage_size(
child_key.clone(),
child_info.clone(),
child_type,
key.clone(),
None,
).wait(),
@@ -130,7 +123,7 @@ fn should_call_contract() {
let core = tokio::runtime::Runtime::new().unwrap();
let client = Arc::new(substrate_test_runtime_client::new());
let genesis_hash = client.genesis_hash();
let client = new_full(client, Subscriptions::new(Arc::new(core.executor())));
let (client, _child) = new_full(client, Subscriptions::new(Arc::new(core.executor())));
assert_matches!(
client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()).wait(),
@@ -146,7 +139,7 @@ fn should_notify_about_storage_changes() {
{
let mut client = Arc::new(substrate_test_runtime_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
api.subscribe_storage(Default::default(), subscriber, None.into());
@@ -179,7 +172,7 @@ fn should_send_initial_storage_changes_and_notifications() {
{
let mut client = Arc::new(substrate_test_runtime_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into()));
@@ -215,7 +208,7 @@ fn should_send_initial_storage_changes_and_notifications() {
fn should_query_storage() {
fn run_tests(mut client: Arc<TestClient>, has_changes_trie_config: bool) {
let core = tokio::runtime::Runtime::new().unwrap();
let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let mut add_block = |nonce| {
let mut builder = client.new_block(Default::default()).unwrap();
@@ -434,7 +427,7 @@ fn should_return_runtime_version() {
let core = tokio::runtime::Runtime::new().unwrap();
let client = Arc::new(substrate_test_runtime_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\
\"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",3],\
@@ -458,7 +451,7 @@ fn should_notify_on_runtime_version_initially() {
{
let client = Arc::new(substrate_test_runtime_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let (api, _child) = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor())));
api.subscribe_runtime_version(Default::default(), subscriber);
+6 -5
View File
@@ -1015,7 +1015,7 @@ ServiceBuilder<
let subscriptions = sc_rpc::Subscriptions::new(Arc::new(tasks_builder.spawn_handle()));
let (chain, state) = if let (Some(remote_backend), Some(on_demand)) =
let (chain, state, child_state) = if let (Some(remote_backend), Some(on_demand)) =
(remote_backend.as_ref(), on_demand.as_ref()) {
// Light clients
let chain = sc_rpc::chain::new_light(
@@ -1024,19 +1024,19 @@ ServiceBuilder<
remote_backend.clone(),
on_demand.clone()
);
let state = sc_rpc::state::new_light(
let (state, child_state) = sc_rpc::state::new_light(
client.clone(),
subscriptions.clone(),
remote_backend.clone(),
on_demand.clone()
);
(chain, state)
(chain, state, child_state)
} else {
// Full nodes
let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone());
let state = sc_rpc::state::new_full(client.clone(), subscriptions.clone());
(chain, state)
let (state, child_state) = sc_rpc::state::new_full(client.clone(), subscriptions.clone());
(chain, state, child_state)
};
let author = sc_rpc::author::Author::new(
@@ -1059,6 +1059,7 @@ ServiceBuilder<
sc_rpc_server::rpc_handler((
state::StateApi::to_delegate(state),
state::ChildStateApi::to_delegate(child_state),
chain::ChainApi::to_delegate(chain),
maybe_offchain_rpc,
author::AuthorApi::to_delegate(author),
+16 -23
View File
@@ -25,8 +25,8 @@ use parking_lot::{Mutex, RwLock};
use codec::{Encode, Decode};
use hash_db::Prefix;
use sp_core::{
ChangesTrieConfiguration, convert_hash, traits::CodeExecutor,
NativeOrEncoded, storage::{StorageKey, StorageData, well_known_keys, ChildInfo},
ChangesTrieConfiguration, convert_hash, traits::CodeExecutor, NativeOrEncoded,
storage::{StorageKey, PrefixedStorageKey, StorageData, well_known_keys, ChildInfo},
};
use sc_telemetry::{telemetry, SUBSTRATE_INFO};
use sp_runtime::{
@@ -344,7 +344,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
storage_key: Option<&StorageKey>,
storage_key: Option<&PrefixedStorageKey>,
key: &StorageKey,
cht_size: NumberFor<Block>,
) -> sp_blockchain::Result<ChangesProof<Block::Header>> {
@@ -393,7 +393,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
fn with_cached_changed_keys(
&self,
root: &Block::Hash,
functor: &mut dyn FnMut(&HashMap<Option<Vec<u8>>, HashSet<Vec<u8>>>),
functor: &mut dyn FnMut(&HashMap<Option<PrefixedStorageKey>, HashSet<Vec<u8>>>),
) -> bool {
self.storage.with_cached_changed_keys(root, functor)
}
@@ -438,7 +438,7 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
number: last_number,
},
max_number,
storage_key.as_ref().map(|x| &x.0[..]),
storage_key,
&key.0,
)
.map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?;
@@ -1109,12 +1109,11 @@ impl<B, E, Block, RA> ProofProvider<Block> for Client<B, E, Block, RA> where
fn read_child_proof(
&self,
id: &BlockId<Block>,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
keys: &mut dyn Iterator<Item=&[u8]>,
) -> sp_blockchain::Result<StorageProof> {
self.state_at(id)
.and_then(|state| prove_child_read(state, storage_key, child_info, keys)
.and_then(|state| prove_child_read(state, child_info, keys)
.map_err(Into::into))
}
@@ -1156,7 +1155,7 @@ impl<B, E, Block, RA> ProofProvider<Block> for Client<B, E, Block, RA> where
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
storage_key: Option<&StorageKey>,
storage_key: Option<&PrefixedStorageKey>,
key: &StorageKey,
) -> sp_blockchain::Result<ChangesProof<Block::Header>> {
self.key_changes_proof_with_cht_size(
@@ -1286,46 +1285,40 @@ impl<B, E, Block, RA> StorageProvider<Block, B> for Client<B, E, Block, RA> wher
)
}
fn child_storage_keys(
&self,
id: &BlockId<Block>,
child_storage_key: &StorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key_prefix: &StorageKey
) -> sp_blockchain::Result<Vec<StorageKey>> {
let keys = self.state_at(id)?
.child_keys(&child_storage_key.0, child_info, &key_prefix.0)
.child_keys(child_info, &key_prefix.0)
.into_iter()
.map(StorageKey)
.collect();
Ok(keys)
}
fn child_storage(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<StorageData>> {
Ok(self.state_at(id)?
.child_storage(&storage_key.0, child_info, &key.0)
.child_storage(child_info, &key.0)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
.map(StorageData))
}
fn child_storage_hash(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<Block::Hash>> {
Ok(self.state_at(id)?
.child_storage_hash(&storage_key.0, child_info, &key.0)
.child_storage_hash(child_info, &key.0)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
)
}
@@ -1361,7 +1354,7 @@ impl<B, E, Block, RA> StorageProvider<Block, B> for Client<B, E, Block, RA> wher
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
storage_key: Option<&StorageKey>,
storage_key: Option<&PrefixedStorageKey>,
key: &StorageKey
) -> sp_blockchain::Result<Vec<(NumberFor<Block>, u32)>> {
let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?;
@@ -1392,7 +1385,7 @@ impl<B, E, Block, RA> StorageProvider<Block, B> for Client<B, E, Block, RA> wher
range_first,
&range_anchor,
best_number,
storage_key.as_ref().map(|x| &x.0[..]),
storage_key,
&key.0)
.and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::<Result<_, _>>())
.map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?;
+6 -5
View File
@@ -516,9 +516,9 @@ impl<Block: BlockT> backend::BlockImportOperation<Block> for BlockImportOperatio
fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result<Block::Hash> {
check_genesis_storage(&storage)?;
let child_delta = storage.children.into_iter()
.map(|(storage_key, child_content)|
(storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info));
let child_delta = storage.children_default.into_iter()
.map(|(_storage_key, child_content)|
(child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v)))));
let (root, transaction) = self.old_state.full_storage_root(
storage.top.into_iter().map(|(k, v)| (k, Some(v))),
@@ -725,8 +725,9 @@ pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> {
return Err(sp_blockchain::Error::GenesisInvalid.into());
}
if storage.children.keys().any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) {
return Err(sp_blockchain::Error::GenesisInvalid.into());
if storage.children_default.keys()
.any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) {
return Err(sp_blockchain::Error::GenesisInvalid.into());
}
Ok(())
+16 -21
View File
@@ -24,7 +24,7 @@ use parking_lot::RwLock;
use codec::{Decode, Encode};
use sp_core::ChangesTrieConfiguration;
use sp_core::storage::{well_known_keys, ChildInfo, OwnedChildInfo};
use sp_core::storage::{well_known_keys, ChildInfo};
use sp_core::offchain::storage::InMemOffchainStorage;
use sp_state_machine::{
Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction,
@@ -312,17 +312,17 @@ impl<S, Block> BlockImportOperation<Block> for ImportOperation<Block, S>
self.changes_trie_config_update = Some(changes_trie_config);
// this is only called when genesis block is imported => shouldn't be performance bottleneck
let mut storage: HashMap<Option<(Vec<u8>, OwnedChildInfo)>, _> = HashMap::new();
let mut storage: HashMap<Option<ChildInfo>, _> = HashMap::new();
storage.insert(None, input.top);
// create a list of children keys to re-compute roots for
let child_delta = input.children.iter()
.map(|(storage_key, storage_child)| (storage_key.clone(), None, storage_child.child_info.clone()))
let child_delta = input.children_default.iter()
.map(|(_storage_key, storage_child)| (storage_child.child_info.clone(), None))
.collect::<Vec<_>>();
// make sure to persist the child storage
for (child_key, storage_child) in input.children {
storage.insert(Some((child_key, storage_child.child_info)), storage_child.data);
for (_child_key, storage_child) in input.children_default {
storage.insert(Some(storage_child.child_info), storage_child.data);
}
let storage_update = InMemoryBackend::from(storage);
@@ -386,13 +386,12 @@ impl<H: Hasher> StateBackend<H> for GenesisOrUnavailableState<H>
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> ClientResult<Option<Vec<u8>>> {
match *self {
GenesisOrUnavailableState::Genesis(ref state) =>
Ok(state.child_storage(storage_key, child_info, key).expect(IN_MEMORY_EXPECT_PROOF)),
Ok(state.child_storage(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)),
GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient),
}
}
@@ -407,13 +406,12 @@ impl<H: Hasher> StateBackend<H> for GenesisOrUnavailableState<H>
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
match *self {
GenesisOrUnavailableState::Genesis(ref state) => Ok(
state.next_child_storage_key(storage_key, child_info, key)
state.next_child_storage_key(child_info, key)
.expect(IN_MEMORY_EXPECT_PROOF)
),
GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient),
@@ -436,27 +434,25 @@ impl<H: Hasher> StateBackend<H> for GenesisOrUnavailableState<H>
fn for_keys_in_child_storage<A: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
action: A,
) {
match *self {
GenesisOrUnavailableState::Genesis(ref state) =>
state.for_keys_in_child_storage(storage_key, child_info, action),
state.for_keys_in_child_storage(child_info, action),
GenesisOrUnavailableState::Unavailable => (),
}
}
fn for_child_keys_with_prefix<A: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
action: A,
) {
match *self {
GenesisOrUnavailableState::Genesis(ref state) =>
state.for_child_keys_with_prefix(storage_key, child_info, prefix, action),
state.for_child_keys_with_prefix(child_info, prefix, action),
GenesisOrUnavailableState::Unavailable => (),
}
}
@@ -474,8 +470,7 @@ impl<H: Hasher> StateBackend<H> for GenesisOrUnavailableState<H>
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (H::Out, bool, Self::Transaction)
where
@@ -483,7 +478,7 @@ impl<H: Hasher> StateBackend<H> for GenesisOrUnavailableState<H>
{
match *self {
GenesisOrUnavailableState::Genesis(ref state) => {
let (root, is_equal, _) = state.child_storage_root(storage_key, child_info, delta);
let (root, is_equal, _) = state.child_storage_root(child_info, delta);
(root, is_equal, Default::default())
},
GenesisOrUnavailableState::Unavailable =>
+14 -14
View File
@@ -23,6 +23,7 @@ use std::marker::PhantomData;
use hash_db::{HashDB, Hasher, EMPTY_PREFIX};
use codec::{Decode, Encode};
use sp_core::{convert_hash, traits::CodeExecutor};
use sp_core::storage::{ChildInfo, ChildType};
use sp_runtime::traits::{
Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor,
AtLeast32Bit, CheckedConversion,
@@ -135,7 +136,7 @@ impl<E, H, B: BlockT, S: BlockchainStorage<B>> LightDataChecker<E, H, B, S> {
number: request.last_block.0,
},
remote_max_block,
request.storage_key.as_ref().map(Vec::as_slice),
request.storage_key.as_ref(),
&request.key)
.map_err(|err| ClientError::ChangesTrieAccessFailed(err))?;
result.extend(result_range);
@@ -242,10 +243,14 @@ impl<E, Block, H, S> FetchChecker<Block> for LightDataChecker<E, H, Block, S>
request: &RemoteReadChildRequest<Block::Header>,
remote_proof: StorageProof,
) -> ClientResult<HashMap<Vec<u8>, Option<Vec<u8>>>> {
let child_info = match ChildType::from_prefixed_key(&request.storage_key) {
Some((ChildType::ParentKeyId, storage_key)) => ChildInfo::new_default(storage_key),
None => return Err("Invalid child type".into()),
};
read_child_proof_check::<H, _>(
convert_hash(request.header.state_root()),
remote_proof,
&request.storage_key,
&child_info,
request.keys.iter(),
).map_err(Into::into)
}
@@ -360,8 +365,6 @@ pub mod tests {
use sc_client_api::{StorageProvider, ProofProvider};
use sc_block_builder::BlockBuilderProvider;
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1");
type TestChecker = LightDataChecker<
NativeExecutor<substrate_test_runtime_client::LocalExecutor>,
BlakeTwo256,
@@ -411,11 +414,12 @@ pub mod tests {
fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec<u8>) {
use substrate_test_runtime_client::DefaultTestClientBuilderExt;
use substrate_test_runtime_client::TestClientBuilderExt;
let child_info = ChildInfo::new_default(b"child1");
let child_info = &child_info;
// prepare remote client
let remote_client = substrate_test_runtime_client::TestClientBuilder::new()
.add_extra_child_storage(
b":child_storage:default:child1".to_vec(),
CHILD_INFO_1,
child_info,
b"key1".to_vec(),
b"value1".to_vec(),
).build();
@@ -428,15 +432,13 @@ pub mod tests {
// 'fetch' child read proof from remote node
let child_value = remote_client.child_storage(
&remote_block_id,
&StorageKey(b":child_storage:default:child1".to_vec()),
CHILD_INFO_1,
child_info,
&StorageKey(b"key1".to_vec()),
).unwrap().unwrap().0;
assert_eq!(b"value1"[..], child_value[..]);
let remote_read_proof = remote_client.read_child_proof(
&remote_block_id,
b":child_storage:default:child1",
CHILD_INFO_1,
child_info,
&mut std::iter::once("key1".as_bytes()),
).unwrap();
@@ -510,20 +512,18 @@ pub mod tests {
#[test]
fn storage_child_read_proof_is_generated_and_checked() {
let child_info = ChildInfo::new_default(&b"child1"[..]);
let (
local_checker,
remote_block_header,
remote_read_proof,
result,
) = prepare_for_read_child_proof_check();
let child_infos = CHILD_INFO_1.info();
assert_eq!((&local_checker as &dyn FetchChecker<Block>).check_read_child_proof(
&RemoteReadChildRequest::<Header> {
block: remote_block_header.hash(),
header: remote_block_header,
storage_key: b":child_storage:default:child1".to_vec(),
child_info: child_infos.0.to_vec(),
child_type: child_infos.1,
storage_key: child_info.prefixed_storage_key(),
keys: vec![b"key1".to_vec()],
retry_count: None,
},
+6 -7
View File
@@ -128,7 +128,7 @@ impl<T: Trait> AccountDb<T> for DirectAccountDb {
trie_id: Option<&TrieId>,
location: &StorageKey
) -> Option<Vec<u8>> {
trie_id.and_then(|id| child::get_raw(id, crate::trie_unique_id(&id[..]), &blake2_256(location)))
trie_id.and_then(|id| child::get_raw(&crate::child_trie_info(&id[..]), &blake2_256(location)))
}
fn get_code_hash(&self, account: &T::AccountId) -> Option<CodeHash<T>> {
<ContractInfoOf<T>>::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash))
@@ -167,13 +167,13 @@ impl<T: Trait> AccountDb<T> for DirectAccountDb {
(false, Some(info), _) => info,
// Existing contract is being removed.
(true, Some(info), None) => {
child::kill_storage(&info.trie_id, info.child_trie_unique_id());
child::kill_storage(&info.child_trie_info());
<ContractInfoOf<T>>::remove(&address);
continue;
}
// Existing contract is being replaced by a new one.
(true, Some(info), Some(code_hash)) => {
child::kill_storage(&info.trie_id, info.child_trie_unique_id());
child::kill_storage(&info.child_trie_info());
AliveContractInfo::<T> {
code_hash,
storage_size: T::StorageSizeOffset::get(),
@@ -212,17 +212,16 @@ impl<T: Trait> AccountDb<T> for DirectAccountDb {
for (k, v) in changed.storage.into_iter() {
if let Some(value) = child::get_raw(
&new_info.trie_id[..],
new_info.child_trie_unique_id(),
&new_info.child_trie_info(),
&blake2_256(&k),
) {
new_info.storage_size -= value.len() as u32;
}
if let Some(value) = v {
new_info.storage_size += value.len() as u32;
child::put_raw(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]);
child::put_raw(&new_info.child_trie_info(), &blake2_256(&k), &value[..]);
} else {
child::kill(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k));
child::kill(&new_info.child_trie_info(), &blake2_256(&k));
}
}
+12 -27
View File
@@ -125,12 +125,11 @@ use sp_runtime::{
use frame_support::dispatch::{DispatchResult, Dispatchable};
use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT};
use frame_support::{
Parameter, decl_module, decl_event, decl_storage, decl_error, storage::child,
parameter_types, IsSubType,
Parameter, decl_module, decl_event, decl_storage, decl_error,
parameter_types, IsSubType, storage::child::{self, ChildInfo},
};
use frame_support::traits::{OnUnbalanced, Currency, Get, Time, Randomness};
use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root};
use sp_core::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX;
use pallet_contracts_primitives::{RentProjection, ContractAccessError};
pub type CodeHash<T> = <T as frame_system::Trait>::Hash;
@@ -229,15 +228,14 @@ pub struct RawAliveContractInfo<CodeHash, Balance, BlockNumber> {
impl<CodeHash, Balance, BlockNumber> RawAliveContractInfo<CodeHash, Balance, BlockNumber> {
/// Associated child trie unique id is built from the hash part of the trie id.
pub fn child_trie_unique_id(&self) -> child::ChildInfo {
trie_unique_id(&self.trie_id[..])
pub fn child_trie_info(&self) -> ChildInfo {
child_trie_info(&self.trie_id[..])
}
}
/// Associated child trie unique id is built from the hash part of the trie id.
pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo {
let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len();
child::ChildInfo::new_default(&trie_id[start ..])
pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo {
ChildInfo::new_default(trie_id)
}
pub type TombstoneContractInfo<T> =
@@ -270,10 +268,6 @@ pub trait TrieIdGenerator<AccountId> {
///
/// The implementation must ensure every new trie id is unique: two consecutive calls with the
/// same parameter needs to return different trie id values.
///
/// Also, the implementation is responsible for ensuring that `TrieId` starts with
/// `:child_storage:`.
/// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325
fn trie_id(account_id: &AccountId) -> TrieId;
}
@@ -297,13 +291,7 @@ where
let mut buf = Vec::new();
buf.extend_from_slice(account_id.as_ref());
buf.extend_from_slice(&new_seed.to_le_bytes()[..]);
// TODO: see https://github.com/paritytech/substrate/issues/2325
CHILD_STORAGE_KEY_PREFIX.iter()
.chain(b"default:")
.chain(T::Hashing::hash(&buf[..]).as_ref().iter())
.cloned()
.collect()
T::Hashing::hash(&buf[..]).as_ref().into()
}
}
@@ -824,13 +812,11 @@ impl<T: Trait> Module<T> {
let key_values_taken = delta.iter()
.filter_map(|key| {
child::get_raw(
&origin_contract.trie_id,
origin_contract.child_trie_unique_id(),
&origin_contract.child_trie_info(),
&blake2_256(key),
).map(|value| {
child::kill(
&origin_contract.trie_id,
origin_contract.child_trie_unique_id(),
&origin_contract.child_trie_info(),
&blake2_256(key),
);
@@ -842,8 +828,8 @@ impl<T: Trait> Module<T> {
let tombstone = <TombstoneContractInfo<T>>::new(
// This operation is cheap enough because last_write (delta not included)
// is not this block as it has been checked earlier.
&child::child_root(
&origin_contract.trie_id,
&child::root(
&origin_contract.child_trie_info(),
)[..],
code_hash,
);
@@ -851,8 +837,7 @@ impl<T: Trait> Module<T> {
if tombstone != dest_tombstone {
for (key, value) in key_values_taken {
child::put_raw(
&origin_contract.trie_id,
origin_contract.child_trie_unique_id(),
&origin_contract.child_trie_info(),
&blake2_256(key),
&value,
);
+5 -5
View File
@@ -223,8 +223,7 @@ fn enact_verdict<T: Trait>(
Verdict::Kill => {
<ContractInfoOf<T>>::remove(account);
child::kill_storage(
&alive_contract_info.trie_id,
alive_contract_info.child_trie_unique_id(),
&alive_contract_info.child_trie_info(),
);
<Module<T>>::deposit_event(RawEvent::Evicted(account.clone(), false));
None
@@ -235,7 +234,9 @@ fn enact_verdict<T: Trait>(
}
// Note: this operation is heavy.
let child_storage_root = child::child_root(&alive_contract_info.trie_id);
let child_storage_root = child::root(
&alive_contract_info.child_trie_info(),
);
let tombstone = <TombstoneContractInfo<T>>::new(
&child_storage_root[..],
@@ -245,8 +246,7 @@ fn enact_verdict<T: Trait>(
<ContractInfoOf<T>>::insert(account, &tombstone_info);
child::kill_storage(
&alive_contract_info.trie_id,
alive_contract_info.child_trie_unique_id(),
&alive_contract_info.child_trie_info(),
);
<Module<T>>::deposit_event(RawEvent::Evicted(account.clone(), true));
-3
View File
@@ -200,10 +200,7 @@ impl TrieIdGenerator<u64> for DummyTrieIdGenerator {
*v
});
// TODO: see https://github.com/paritytech/substrate/issues/2325
let mut res = vec![];
res.extend_from_slice(well_known_keys::CHILD_STORAGE_KEY_PREFIX);
res.extend_from_slice(b"default:");
res.extend_from_slice(&new_seed.to_le_bytes());
res.extend_from_slice(&account_id.to_le_bytes());
res
+89 -106
View File
@@ -16,100 +16,90 @@
//! Operation on runtime child storages.
//!
//! This module is a currently only a variant of unhashed with additional `storage_key`.
//! Note that `storage_key` must be unique and strong (strong in the sense of being long enough to
//! avoid collision from a resistant hash function (which unique implies)).
//!
//! A **key collision free** unique id is required as parameter to avoid key collision
//! between child tries.
//! This unique id management and generation responsibility is delegated to pallet module.
// NOTE: could replace unhashed by having only one kind of storage (root being null storage key (storage_key can become Option<&[u8]>).
//! This module is a currently only a variant of unhashed with additional `child_info`.
// NOTE: could replace unhashed by having only one kind of storage (top trie being the child info
// of null length parent storage key).
use crate::sp_std::prelude::*;
use codec::{Codec, Encode, Decode};
pub use sp_core::storage::ChildInfo;
pub use sp_core::storage::{ChildInfo, ChildType};
/// Return the value of the item in storage under `key`, or `None` if there is no explicit entry.
pub fn get<T: Decode + Sized>(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<T> {
let (data, child_type) = child_info.info();
sp_io::storage::child_get(
storage_key,
data,
child_type,
key,
).and_then(|v| {
Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| {
// TODO #3700: error should be handleable.
runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key);
None
})
})
match child_info.child_type() {
ChildType::ParentKeyId => {
let storage_key = child_info.storage_key();
sp_io::default_child_storage::get(
storage_key,
key,
).and_then(|v| {
Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| {
// TODO #3700: error should be handleable.
runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key);
None
})
})
},
}
}
/// Return the value of the item in storage under `key`, or the type's default if there is no
/// explicit entry.
pub fn get_or_default<T: Decode + Sized + Default>(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> T {
get(storage_key, child_info, key).unwrap_or_else(Default::default)
get(child_info, key).unwrap_or_else(Default::default)
}
/// Return the value of the item in storage under `key`, or `default_value` if there is no
/// explicit entry.
pub fn get_or<T: Decode + Sized>(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
default_value: T,
) -> T {
get(storage_key, child_info, key).unwrap_or(default_value)
get(child_info, key).unwrap_or(default_value)
}
/// Return the value of the item in storage under `key`, or `default_value()` if there is no
/// explicit entry.
pub fn get_or_else<T: Decode + Sized, F: FnOnce() -> T>(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
default_value: F,
) -> T {
get(storage_key, child_info, key).unwrap_or_else(default_value)
get(child_info, key).unwrap_or_else(default_value)
}
/// Put `value` in storage under `key`.
pub fn put<T: Encode>(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
value: &T,
) {
let (data, child_type) = child_info.info();
value.using_encoded(|slice|
sp_io::storage::child_set(
storage_key,
data,
child_type,
key,
slice,
)
);
match child_info.child_type() {
ChildType::ParentKeyId => value.using_encoded(|slice|
sp_io::default_child_storage::set(
child_info.storage_key(),
key,
slice,
)
),
}
}
/// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise.
pub fn take<T: Decode + Sized>(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<T> {
let r = get(storage_key, child_info, key);
let r = get(child_info, key);
if r.is_some() {
kill(storage_key, child_info, key);
kill(child_info, key);
}
r
}
@@ -117,113 +107,106 @@ pub fn take<T: Decode + Sized>(
/// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage,
/// the default for its type.
pub fn take_or_default<T: Codec + Sized + Default>(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> T {
take(storage_key, child_info, key).unwrap_or_else(Default::default)
take(child_info, key).unwrap_or_else(Default::default)
}
/// Return the value of the item in storage under `key`, or `default_value` if there is no
/// explicit entry. Ensure there is no explicit entry on return.
pub fn take_or<T: Codec + Sized>(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
default_value: T,
) -> T {
take(storage_key, child_info, key).unwrap_or(default_value)
take(child_info, key).unwrap_or(default_value)
}
/// Return the value of the item in storage under `key`, or `default_value()` if there is no
/// explicit entry. Ensure there is no explicit entry on return.
pub fn take_or_else<T: Codec + Sized, F: FnOnce() -> T>(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
default_value: F,
) -> T {
take(storage_key, child_info, key).unwrap_or_else(default_value)
take(child_info, key).unwrap_or_else(default_value)
}
/// Check to see if `key` has an explicit entry in storage.
pub fn exists(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> bool {
let (data, child_type) = child_info.info();
sp_io::storage::child_read(
storage_key, data, child_type,
key, &mut [0;0][..], 0,
).is_some()
match child_info.child_type() {
ChildType::ParentKeyId => sp_io::default_child_storage::read(
child_info.storage_key(),
key, &mut [0;0][..], 0,
).is_some(),
}
}
/// Remove all `storage_key` key/values
pub fn kill_storage(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
) {
let (data, child_type) = child_info.info();
sp_io::storage::child_storage_kill(
storage_key,
data,
child_type,
)
match child_info.child_type() {
ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill(
child_info.storage_key(),
),
}
}
/// Ensure `key` has no explicit entry in storage.
pub fn kill(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) {
let (data, child_type) = child_info.info();
sp_io::storage::child_clear(
storage_key,
data,
child_type,
key,
);
match child_info.child_type() {
ChildType::ParentKeyId => {
sp_io::default_child_storage::clear(
child_info.storage_key(),
key,
);
},
}
}
/// Get a Vec of bytes from storage.
pub fn get_raw(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<Vec<u8>> {
let (data, child_type) = child_info.info();
sp_io::storage::child_get(
storage_key,
data,
child_type,
key,
)
match child_info.child_type() {
ChildType::ParentKeyId => sp_io::default_child_storage::get(
child_info.storage_key(),
key,
),
}
}
/// Put a raw byte slice into storage.
pub fn put_raw(
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
value: &[u8],
) {
let (data, child_type) = child_info.info();
sp_io::storage::child_set(
storage_key,
data,
child_type,
key,
value,
)
match child_info.child_type() {
ChildType::ParentKeyId => sp_io::default_child_storage::set(
child_info.storage_key(),
key,
value,
),
}
}
/// Calculate current child root value.
pub fn child_root(
storage_key: &[u8],
pub fn root(
child_info: &ChildInfo,
) -> Vec<u8> {
sp_io::storage::child_root(
storage_key,
)
match child_info.child_type() {
ChildType::ParentKeyId => sp_io::default_child_storage::root(
child_info.storage_key(),
),
}
}
@@ -301,7 +301,7 @@ fn new_test_ext() -> sp_io::TestExternalities {
fn storage_instance_independence() {
let mut storage = sp_core::storage::Storage {
top: std::collections::BTreeMap::new(),
children: std::collections::HashMap::new()
children_default: std::collections::HashMap::new()
};
sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || {
module2::Value::<Runtime>::put(0);
+1 -1
View File
@@ -930,7 +930,7 @@ impl<T: Trait> Module<T> {
<Number<T>>::hashed_key().to_vec() => T::BlockNumber::one().encode(),
<ParentHash<T>>::hashed_key().to_vec() => [69u8; 32].encode()
],
children: map![],
children_default: map![],
})
}
+14 -22
View File
@@ -24,7 +24,7 @@
use std::any::{Any, TypeId};
use sp_storage::{ChildStorageKey, ChildInfo};
use sp_storage::ChildInfo;
pub use scope_limited::{set_and_run_with_externalities, with_externalities};
pub use extensions::{Extension, Extensions, ExtensionStore};
@@ -62,8 +62,7 @@ pub trait Externalities: ExtensionStore {
/// Returns an `Option` that holds the SCALE encoded hash.
fn child_storage_hash(
&self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<Vec<u8>>;
@@ -72,8 +71,7 @@ pub trait Externalities: ExtensionStore {
/// Returns an `Option` that holds the SCALE encoded hash.
fn child_storage(
&self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<Vec<u8>>;
@@ -85,12 +83,11 @@ pub trait Externalities: ExtensionStore {
/// Set child storage entry `key` of current contract being called (effective immediately).
fn set_child_storage(
&mut self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: Vec<u8>,
value: Vec<u8>,
) {
self.place_child_storage(storage_key, child_info, key, Some(value))
self.place_child_storage(child_info, key, Some(value))
}
/// Clear a storage entry (`key`) of current contract being called (effective immediately).
@@ -101,11 +98,10 @@ pub trait Externalities: ExtensionStore {
/// Clear a child storage entry (`key`) of current contract being called (effective immediately).
fn clear_child_storage(
&mut self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) {
self.place_child_storage(storage_key, child_info, key.to_vec(), None)
self.place_child_storage(child_info, key.to_vec(), None)
}
/// Whether a storage entry exists.
@@ -116,11 +112,10 @@ pub trait Externalities: ExtensionStore {
/// Whether a child storage entry exists.
fn exists_child_storage(
&self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> bool {
self.child_storage(storage_key, child_info, key).is_some()
self.child_storage(child_info, key).is_some()
}
/// Returns the key immediately following the given key, if it exists.
@@ -129,13 +124,12 @@ pub trait Externalities: ExtensionStore {
/// Returns the key immediately following the given key, if it exists, in child storage.
fn next_child_storage_key(
&self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<Vec<u8>>;
/// Clear an entire child storage.
fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo);
fn kill_child_storage(&mut self, child_info: &ChildInfo);
/// Clear storage entries which keys are start with the given prefix.
fn clear_prefix(&mut self, prefix: &[u8]);
@@ -143,8 +137,7 @@ pub trait Externalities: ExtensionStore {
/// Clear child storage entries which keys are start with the given prefix.
fn clear_child_prefix(
&mut self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
);
@@ -154,8 +147,7 @@ pub trait Externalities: ExtensionStore {
/// Set or clear a child storage entry.
fn place_child_storage(
&mut self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: Vec<u8>,
value: Option<Vec<u8>>,
);
@@ -178,7 +170,7 @@ pub trait Externalities: ExtensionStore {
/// storage map will be removed.
fn child_storage_root(
&mut self,
storage_key: ChildStorageKey,
child_info: &ChildInfo,
) -> Vec<u8>;
/// Get the changes trie root of the current storage overlay at a block with given `parent`.
+132 -175
View File
@@ -37,7 +37,7 @@ use sp_core::{
traits::{KeystoreExt, CallInWasmExt, TaskExecutorExt},
offchain::{OffchainExt, TransactionPoolExt},
hexdisplay::HexDisplay,
storage::{ChildStorageKey, ChildInfo},
storage::ChildInfo,
};
use sp_core::{
@@ -74,19 +74,6 @@ pub enum EcdsaVerifyError {
BadSignature,
}
/// Returns a `ChildStorageKey` if the given `storage_key` slice is a valid storage
/// key or panics otherwise.
///
/// Panicking here is aligned with what the `without_std` environment would do
/// in the case of an invalid child storage key.
#[cfg(feature = "std")]
fn child_storage_key_or_panic(storage_key: &[u8]) -> ChildStorageKey {
match ChildStorageKey::from_slice(storage_key) {
Some(storage_key) => storage_key,
None => panic!("child storage key is invalid"),
}
}
/// Interface for accessing the storage from within the runtime.
#[runtime_interface]
pub trait Storage {
@@ -95,30 +82,6 @@ pub trait Storage {
self.storage(key).map(|s| s.to_vec())
}
/// All Child api uses :
/// - A `child_storage_key` to define the anchor point for the child proof
/// (commonly the location where the child root is stored in its parent trie).
/// - A `child_storage_types` to identify the kind of the child type and how its
/// `child definition` parameter is encoded.
/// - A `child_definition_parameter` which is the additional information required
/// to use the child trie. For instance defaults child tries requires this to
/// contain a collision free unique id.
///
/// This function specifically returns the data for `key` in the child storage or `None`
/// if the key can not be found.
fn child_get(
&self,
child_storage_key: &[u8],
child_definition: &[u8],
child_type: u32,
key: &[u8],
) -> Option<Vec<u8>> {
let storage_key = child_storage_key_or_panic(child_storage_key);
let child_info = ChildInfo::resolve_child_info(child_type, child_definition)
.expect("Invalid child definition");
self.child_storage(storage_key, child_info, key).map(|s| s.to_vec())
}
/// Get `key` from storage, placing the value into `value_out` and return the number of
/// bytes that the entry in storage has beyond the offset or `None` if the storage entry
/// doesn't exist at all.
@@ -134,135 +97,26 @@ pub trait Storage {
})
}
/// Get `key` from child storage, placing the value into `value_out` and return the number
/// of bytes that the entry in storage has beyond the offset or `None` if the storage entry
/// doesn't exist at all.
/// If `value_out` length is smaller than the returned length, only `value_out` length bytes
/// are copied into `value_out`.
///
/// See `child_get` for common child api parameters.
fn child_read(
&self,
child_storage_key: &[u8],
child_definition: &[u8],
child_type: u32,
key: &[u8],
value_out: &mut [u8],
value_offset: u32,
) -> Option<u32> {
let storage_key = child_storage_key_or_panic(child_storage_key);
let child_info = ChildInfo::resolve_child_info(child_type, child_definition)
.expect("Invalid child definition");
self.child_storage(storage_key, child_info, key)
.map(|value| {
let value_offset = value_offset as usize;
let data = &value[value_offset.min(value.len())..];
let written = std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
value.len() as u32
})
}
/// Set `key` to `value` in the storage.
fn set(&mut self, key: &[u8], value: &[u8]) {
self.set_storage(key.to_vec(), value.to_vec());
}
/// Set `key` to `value` in the child storage denoted by `child_storage_key`.
///
/// See `child_get` for common child api parameters.
fn child_set(
&mut self,
child_storage_key: &[u8],
child_definition: &[u8],
child_type: u32,
key: &[u8],
value: &[u8],
) {
let storage_key = child_storage_key_or_panic(child_storage_key);
let child_info = ChildInfo::resolve_child_info(child_type, child_definition)
.expect("Invalid child definition");
self.set_child_storage(storage_key, child_info, key.to_vec(), value.to_vec());
}
/// Clear the storage of the given `key` and its value.
fn clear(&mut self, key: &[u8]) {
self.clear_storage(key)
}
/// Clear the given child storage of the given `key` and its value.
///
/// See `child_get` for common child api parameters.
fn child_clear(
&mut self,
child_storage_key: &[u8],
child_definition: &[u8],
child_type: u32,
key: &[u8],
) {
let storage_key = child_storage_key_or_panic(child_storage_key);
let child_info = ChildInfo::resolve_child_info(child_type, child_definition)
.expect("Invalid child definition");
self.clear_child_storage(storage_key, child_info, key);
}
/// Clear an entire child storage.
///
/// See `child_get` for common child api parameters.
fn child_storage_kill(
&mut self,
child_storage_key: &[u8],
child_definition: &[u8],
child_type: u32,
) {
let storage_key = child_storage_key_or_panic(child_storage_key);
let child_info = ChildInfo::resolve_child_info(child_type, child_definition)
.expect("Invalid child definition");
self.kill_child_storage(storage_key, child_info);
}
/// Check whether the given `key` exists in storage.
fn exists(&self, key: &[u8]) -> bool {
self.exists_storage(key)
}
/// Check whether the given `key` exists in storage.
///
/// See `child_get` for common child api parameters.
fn child_exists(
&self,
child_storage_key: &[u8],
child_definition: &[u8],
child_type: u32,
key: &[u8],
) -> bool {
let storage_key = child_storage_key_or_panic(child_storage_key);
let child_info = ChildInfo::resolve_child_info(child_type, child_definition)
.expect("Invalid child definition");
self.exists_child_storage(storage_key, child_info, key)
}
/// Clear the storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(&mut self, prefix: &[u8]) {
Externalities::clear_prefix(*self, prefix)
}
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
///
/// See `child_get` for common child api parameters.
fn child_clear_prefix(
&mut self,
child_storage_key: &[u8],
child_definition: &[u8],
child_type: u32,
prefix: &[u8],
) {
let storage_key = child_storage_key_or_panic(child_storage_key);
let child_info = ChildInfo::resolve_child_info(child_type, child_definition)
.expect("Invalid child definition");
self.clear_child_prefix(storage_key, child_info, prefix);
}
/// "Commit" all existing operations and compute the resulting storage root.
///
/// The hashing algorithm is defined by the `Block`.
@@ -272,21 +126,6 @@ pub trait Storage {
self.storage_root()
}
/// "Commit" all existing operations and compute the resulting child storage root.
///
/// The hashing algorithm is defined by the `Block`.
///
/// Returns the SCALE encoded hash.
///
/// See `child_get` for common child api parameters.
fn child_root(
&mut self,
child_storage_key: &[u8],
) -> Vec<u8> {
let storage_key = child_storage_key_or_panic(child_storage_key);
self.child_storage_root(storage_key)
}
/// "Commit" all existing operations and get the resulting storage change root.
/// `parent_hash` is a SCALE encoded hash.
///
@@ -303,19 +142,136 @@ pub trait Storage {
fn next_key(&mut self, key: &[u8]) -> Option<Vec<u8>> {
self.next_storage_key(&key)
}
}
/// Get the next key in storage after the given one in lexicographic order in child storage.
fn child_next_key(
&mut self,
child_storage_key: &[u8],
child_definition: &[u8],
child_type: u32,
/// Interface for accessing the child storage for default child trie,
/// from within the runtime.
#[runtime_interface]
pub trait DefaultChildStorage {
/// Get a default child storage value for a given key.
///
/// Parameter `storage_key` is the unprefixed location of the root of the child trie in the parent trie.
/// Result is `None` if the value for `key` in the child storage can not be found.
fn get(
&self,
storage_key: &[u8],
key: &[u8],
) -> Option<Vec<u8>> {
let storage_key = child_storage_key_or_panic(child_storage_key);
let child_info = ChildInfo::resolve_child_info(child_type, child_definition)
.expect("Invalid child definition");
self.next_child_storage_key(storage_key, child_info, key)
let child_info = ChildInfo::new_default(storage_key);
self.child_storage(&child_info, key).map(|s| s.to_vec())
}
/// Allocation efficient variant of `get`.
///
/// Get `key` from child storage, placing the value into `value_out` and return the number
/// of bytes that the entry in storage has beyond the offset or `None` if the storage entry
/// doesn't exist at all.
/// If `value_out` length is smaller than the returned length, only `value_out` length bytes
/// are copied into `value_out`.
fn read(
&self,
storage_key: &[u8],
key: &[u8],
value_out: &mut [u8],
value_offset: u32,
) -> Option<u32> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage(&child_info, key)
.map(|value| {
let value_offset = value_offset as usize;
let data = &value[value_offset.min(value.len())..];
let written = std::cmp::min(data.len(), value_out.len());
value_out[..written].copy_from_slice(&data[..written]);
value.len() as u32
})
}
/// Set a child storage value.
///
/// Set `key` to `value` in the child storage denoted by `storage_key`.
fn set(
&mut self,
storage_key: &[u8],
key: &[u8],
value: &[u8],
) {
let child_info = ChildInfo::new_default(storage_key);
self.set_child_storage(&child_info, key.to_vec(), value.to_vec());
}
/// Clear a child storage key.
///
/// For the default child storage at `storage_key`, clear value at `key`.
fn clear (
&mut self,
storage_key: &[u8],
key: &[u8],
) {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_storage(&child_info, key);
}
/// Clear an entire child storage.
///
/// If it exists, the child storage for `storage_key`
/// is removed.
fn storage_kill(
&mut self,
storage_key: &[u8],
) {
let child_info = ChildInfo::new_default(storage_key);
self.kill_child_storage(&child_info);
}
/// Check a child storage key.
///
/// Check whether the given `key` exists in default child defined at `storage_key`.
fn exists(
&self,
storage_key: &[u8],
key: &[u8],
) -> bool {
let child_info = ChildInfo::new_default(storage_key);
self.exists_child_storage(&child_info, key)
}
/// Clear child default key by prefix.
///
/// Clear the child storage of each key-value pair where the key starts with the given `prefix`.
fn clear_prefix(
&mut self,
storage_key: &[u8],
prefix: &[u8],
) {
let child_info = ChildInfo::new_default(storage_key);
self.clear_child_prefix(&child_info, prefix);
}
/// Default child root calculation.
///
/// "Commit" all existing operations and compute the resulting child storage root.
/// The hashing algorithm is defined by the `Block`.
///
/// Returns the SCALE encoded hash.
fn root(
&mut self,
storage_key: &[u8],
) -> Vec<u8> {
let child_info = ChildInfo::new_default(storage_key);
self.child_storage_root(&child_info)
}
/// Child storage key iteration.
///
/// Get the next key in storage after the given one in lexicographic order in child storage.
fn next_key(
&mut self,
storage_key: &[u8],
key: &[u8],
) -> Option<Vec<u8>> {
let child_info = ChildInfo::new_default(storage_key);
self.next_child_storage_key(&child_info, key)
}
}
@@ -1019,6 +975,7 @@ pub type TestExternalities = sp_state_machine::TestExternalities<sp_core::Blake2
#[cfg(feature = "std")]
pub type SubstrateHostFunctions = (
storage::HostFunctions,
default_child_storage::HostFunctions,
misc::HostFunctions,
offchain::HostFunctions,
crypto::HostFunctions,
@@ -1050,7 +1007,7 @@ mod tests {
t = BasicExternalities::new(Storage {
top: map![b"foo".to_vec() => b"bar".to_vec()],
children: map![],
children_default: map![],
});
t.execute_with(|| {
@@ -1063,7 +1020,7 @@ mod tests {
fn read_storage_works() {
let mut t = BasicExternalities::new(Storage {
top: map![b":test".to_vec() => b"\x0b\0\0\0Hello world".to_vec()],
children: map![],
children_default: map![],
});
t.execute_with(|| {
@@ -1085,7 +1042,7 @@ mod tests {
b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(),
b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec()
],
children: map![],
children_default: map![],
});
t.execute_with(|| {
+4 -4
View File
@@ -136,15 +136,15 @@ impl BuildStorage for sp_core::storage::Storage {
storage: &mut sp_core::storage::Storage,
)-> Result<(), String> {
storage.top.extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone())));
for (k, other_map) in self.children.iter() {
for (k, other_map) in self.children_default.iter() {
let k = k.clone();
if let Some(map) = storage.children.get_mut(&k) {
if let Some(map) = storage.children_default.get_mut(&k) {
map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone())));
if !map.child_info.try_update(other_map.child_info.as_ref()) {
if !map.child_info.try_update(&other_map.child_info) {
return Err("Incompatible child info update".to_string());
}
} else {
storage.children.insert(k, other_map.clone());
storage.children_default.insert(k, other_map.clone());
}
}
Ok(())
@@ -20,7 +20,7 @@ use log::warn;
use hash_db::Hasher;
use codec::{Decode, Encode};
use sp_core::{traits::RuntimeCode, storage::{ChildInfo, OwnedChildInfo, well_known_keys}};
use sp_core::{traits::RuntimeCode, storage::{ChildInfo, well_known_keys}};
use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut};
use crate::{
@@ -54,19 +54,17 @@ pub trait Backend<H: Hasher>: std::fmt::Debug {
/// Get keyed child storage or None if there is nothing associated.
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<StorageValue>, Self::Error>;
/// Get child keyed storage value hash or None if there is nothing associated.
fn child_storage_hash(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<H::Out>, Self::Error> {
self.child_storage(storage_key, child_info, key).map(|v| v.map(|v| H::hash(&v)))
self.child_storage(child_info, key).map(|v| v.map(|v| H::hash(&v)))
}
/// true if a key exists in storage.
@@ -77,11 +75,10 @@ pub trait Backend<H: Hasher>: std::fmt::Debug {
/// true if a key exists in child storage.
fn exists_child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<bool, Self::Error> {
Ok(self.child_storage(storage_key, child_info, key)?.is_some())
Ok(self.child_storage(child_info, key)?.is_some())
}
/// Return the next key in storage in lexicographic order or `None` if there is no value.
@@ -90,16 +87,14 @@ pub trait Backend<H: Hasher>: std::fmt::Debug {
/// Return the next key in child storage in lexicographic order or `None` if there is no value.
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8]
) -> Result<Option<StorageKey>, Self::Error>;
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
f: F,
);
@@ -118,8 +113,7 @@ pub trait Backend<H: Hasher>: std::fmt::Debug {
/// call `f` for each of those keys.
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
);
@@ -137,8 +131,7 @@ pub trait Backend<H: Hasher>: std::fmt::Debug {
/// is true if child storage root equals default storage root.
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (H::Out, bool, Self::Transaction)
where
@@ -158,12 +151,11 @@ pub trait Backend<H: Hasher>: std::fmt::Debug {
/// Get all keys of child storage with given prefix
fn child_keys(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) -> Vec<StorageKey> {
let mut all = Vec::new();
self.for_child_keys_with_prefix(storage_key, child_info, prefix, |k| all.push(k.to_vec()));
self.for_child_keys_with_prefix(child_info, prefix, |k| all.push(k.to_vec()));
all
}
@@ -183,20 +175,21 @@ pub trait Backend<H: Hasher>: std::fmt::Debug {
where
I1: IntoIterator<Item=(StorageKey, Option<StorageValue>)>,
I2i: IntoIterator<Item=(StorageKey, Option<StorageValue>)>,
I2: IntoIterator<Item=(StorageKey, I2i, OwnedChildInfo)>,
I2: IntoIterator<Item=(ChildInfo, I2i)>,
H::Out: Ord + Encode,
{
let mut txs: Self::Transaction = Default::default();
let mut child_roots: Vec<_> = Default::default();
// child first
for (storage_key, child_delta, child_info) in child_deltas {
for (child_info, child_delta) in child_deltas {
let (child_root, empty, child_txs) =
self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta);
self.child_storage_root(&child_info, child_delta);
let prefixed_storage_key = child_info.prefixed_storage_key();
txs.consolidate(child_txs);
if empty {
child_roots.push((storage_key, None));
child_roots.push((prefixed_storage_key.into_inner(), None));
} else {
child_roots.push((storage_key, Some(child_root.encode())));
child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode())));
}
}
let (root, parent_txs) = self.storage_root(
@@ -239,20 +232,18 @@ impl<'a, T: Backend<H>, H: Hasher> Backend<H> for &'a T {
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<StorageKey>, Self::Error> {
(*self).child_storage(storage_key, child_info, key)
(*self).child_storage(child_info, key)
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
f: F,
) {
(*self).for_keys_in_child_storage(storage_key, child_info, f)
(*self).for_keys_in_child_storage(child_info, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<StorageKey>, Self::Error> {
@@ -261,11 +252,10 @@ impl<'a, T: Backend<H>, H: Hasher> Backend<H> for &'a T {
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<StorageKey>, Self::Error> {
(*self).next_child_storage_key(storage_key, child_info, key)
(*self).next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
@@ -274,12 +264,11 @@ impl<'a, T: Backend<H>, H: Hasher> Backend<H> for &'a T {
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
(*self).for_child_keys_with_prefix(storage_key, child_info, prefix, f)
(*self).for_child_keys_with_prefix(child_info, prefix, f)
}
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
@@ -292,15 +281,14 @@ impl<'a, T: Backend<H>, H: Hasher> Backend<H> for &'a T {
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (H::Out, bool, Self::Transaction)
where
I: IntoIterator<Item=(StorageKey, Option<StorageValue>)>,
H::Out: Ord,
{
(*self).child_storage_root(storage_key, child_info, delta)
(*self).child_storage_root(child_info, delta)
}
fn pairs(&self) -> Vec<(StorageKey, StorageValue)> {
@@ -331,7 +319,7 @@ impl Consolidate for () {
}
impl Consolidate for Vec<(
Option<(StorageKey, OwnedChildInfo)>,
Option<ChildInfo>,
StorageCollection,
)> {
fn consolidate(&mut self, mut other: Self) {
+44 -54
View File
@@ -21,11 +21,11 @@ use std::{
};
use crate::{Backend, InMemoryBackend, StorageKey, StorageValue};
use hash_db::Hasher;
use sp_trie::{TrieConfiguration, default_child_trie_root};
use sp_trie::{TrieConfiguration, empty_child_trie_root};
use sp_trie::trie_types::Layout;
use sp_core::{
storage::{
well_known_keys::is_child_storage_key, ChildStorageKey, Storage,
well_known_keys::is_child_storage_key, Storage,
ChildInfo, StorageChild,
},
traits::Externalities, Blake2Hasher,
@@ -83,7 +83,7 @@ impl BasicExternalities {
let mut ext = Self {
inner: Storage {
top: std::mem::replace(&mut storage.top, Default::default()),
children: std::mem::replace(&mut storage.children, Default::default()),
children_default: std::mem::replace(&mut storage.children_default, Default::default()),
},
extensions: Default::default(),
};
@@ -111,7 +111,7 @@ impl BasicExternalities {
impl PartialEq for BasicExternalities {
fn eq(&self, other: &BasicExternalities) -> bool {
self.inner.top.eq(&other.inner.top)
&& self.inner.children.eq(&other.inner.children)
&& self.inner.children_default.eq(&other.inner.children_default)
}
}
@@ -132,7 +132,7 @@ impl From<BTreeMap<StorageKey, StorageValue>> for BasicExternalities {
BasicExternalities {
inner: Storage {
top: hashmap,
children: Default::default(),
children_default: Default::default(),
},
extensions: Default::default(),
}
@@ -150,20 +150,19 @@ impl Externalities for BasicExternalities {
fn child_storage(
&self,
storage_key: ChildStorageKey,
_child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<StorageValue> {
self.inner.children.get(storage_key.as_ref()).and_then(|child| child.data.get(key)).cloned()
self.inner.children_default.get(child_info.storage_key())
.and_then(|child| child.data.get(key)).cloned()
}
fn child_storage_hash(
&self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<Vec<u8>> {
self.child_storage(storage_key, child_info, key).map(|v| Blake2Hasher::hash(&v).encode())
self.child_storage(child_info, key).map(|v| Blake2Hasher::hash(&v).encode())
}
fn next_storage_key(&self, key: &[u8]) -> Option<StorageKey> {
@@ -173,12 +172,11 @@ impl Externalities for BasicExternalities {
fn next_child_storage_key(
&self,
storage_key: ChildStorageKey,
_child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<StorageKey> {
let range = (Bound::Excluded(key), Bound::Unbounded);
self.inner.children.get(storage_key.as_ref())
self.inner.children_default.get(child_info.storage_key())
.and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned())
}
@@ -196,12 +194,11 @@ impl Externalities for BasicExternalities {
fn place_child_storage(
&mut self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: StorageKey,
value: Option<StorageValue>,
) {
let child_map = self.inner.children.entry(storage_key.into_owned())
let child_map = self.inner.children_default.entry(child_info.storage_key().to_vec())
.or_insert_with(|| StorageChild {
data: Default::default(),
child_info: child_info.to_owned(),
@@ -215,10 +212,9 @@ impl Externalities for BasicExternalities {
fn kill_child_storage(
&mut self,
storage_key: ChildStorageKey,
_child_info: ChildInfo,
child_info: &ChildInfo,
) {
self.inner.children.remove(storage_key.as_ref());
self.inner.children_default.remove(child_info.storage_key());
}
fn clear_prefix(&mut self, prefix: &[u8]) {
@@ -243,11 +239,10 @@ impl Externalities for BasicExternalities {
fn clear_child_prefix(
&mut self,
storage_key: ChildStorageKey,
_child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) {
if let Some(child) = self.inner.children.get_mut(storage_key.as_ref()) {
if let Some(child) = self.inner.children_default.get_mut(child_info.storage_key()) {
let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded))
.map(|(k, _)| k)
.take_while(|k| k.starts_with(prefix))
@@ -264,20 +259,19 @@ impl Externalities for BasicExternalities {
fn storage_root(&mut self) -> Vec<u8> {
let mut top = self.inner.top.clone();
let keys: Vec<_> = self.inner.children.keys().map(|k| k.to_vec()).collect();
let prefixed_keys: Vec<_> = self.inner.children_default.iter().map(|(_k, v)| {
(v.child_info.prefixed_storage_key(), v.child_info.clone())
}).collect();
// Single child trie implementation currently allows using the same child
// empty root for all child trie. Using null storage key until multiple
// type of child trie support.
let empty_hash = default_child_trie_root::<Layout<Blake2Hasher>>(&[]);
for storage_key in keys {
let child_root = self.child_storage_root(
ChildStorageKey::from_slice(storage_key.as_slice())
.expect("Map only feed by valid keys; qed"),
);
let empty_hash = empty_child_trie_root::<Layout<Blake2Hasher>>();
for (prefixed_storage_key, child_info) in prefixed_keys {
let child_root = self.child_storage_root(&child_info);
if &empty_hash[..] == &child_root[..] {
top.remove(storage_key.as_slice());
top.remove(prefixed_storage_key.as_slice());
} else {
top.insert(storage_key, child_root);
top.insert(prefixed_storage_key.into_inner(), child_root);
}
}
@@ -286,15 +280,15 @@ impl Externalities for BasicExternalities {
fn child_storage_root(
&mut self,
storage_key: ChildStorageKey,
child_info: &ChildInfo,
) -> Vec<u8> {
if let Some(child) = self.inner.children.get(storage_key.as_ref()) {
if let Some(child) = self.inner.children_default.get(child_info.storage_key()) {
let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v)));
InMemoryBackend::<Blake2Hasher>::default()
.child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0
.child_storage_root(&child.child_info, delta).0
} else {
default_child_trie_root::<Layout<Blake2Hasher>>(storage_key.as_ref())
empty_child_trie_root::<Layout<Blake2Hasher>>()
}.encode()
}
@@ -336,8 +330,6 @@ mod tests {
use sp_core::storage::well_known_keys::CODE;
use hex_literal::hex;
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1");
#[test]
fn commit_should_work() {
let mut ext = BasicExternalities::default();
@@ -361,30 +353,28 @@ mod tests {
#[test]
fn children_works() {
let child_storage = b":child_storage:default:test".to_vec();
let child_info = ChildInfo::new_default(b"storage_key");
let child_info = &child_info;
let mut ext = BasicExternalities::new(Storage {
top: Default::default(),
children: map![
child_storage.clone() => StorageChild {
data: map![ b"doe".to_vec() => b"reindeer".to_vec() ],
child_info: CHILD_INFO_1.to_owned(),
children_default: map![
child_info.storage_key().to_vec() => StorageChild {
data: map![ b"doe".to_vec() => b"reindeer".to_vec() ],
child_info: child_info.to_owned(),
}
]
});
let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap();
assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec()));
assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec()));
ext.set_child_storage(child_info, b"dog".to_vec(), b"puppy".to_vec());
assert_eq!(ext.child_storage(child_info, b"dog"), Some(b"puppy".to_vec()));
ext.set_child_storage(child(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec());
assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec()));
ext.clear_child_storage(child_info, b"dog");
assert_eq!(ext.child_storage(child_info, b"dog"), None);
ext.clear_child_storage(child(), CHILD_INFO_1, b"dog");
assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), None);
ext.kill_child_storage(child(), CHILD_INFO_1);
assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), None);
ext.kill_child_storage(child_info);
assert_eq!(ext.child_storage(child_info, b"doe"), None);
}
#[test]
@@ -392,6 +382,6 @@ mod tests {
// Make sure no values are set by default in `BasicExternalities`.
let storage = BasicExternalities::new_empty().into_storages();
assert!(storage.top.is_empty());
assert!(storage.children.is_empty());
assert!(storage.children_default.is_empty());
}
}
@@ -32,6 +32,7 @@ use crate::{
input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex},
},
};
use sp_core::storage::{ChildInfo, ChildType, PrefixedStorageKey};
/// Prepare input pairs for building a changes trie of given block.
///
@@ -105,19 +106,19 @@ fn prepare_extrinsics_input<'a, B, H, Number>(
Number: BlockNumber,
{
let mut children_keys = BTreeSet::<StorageKey>::new();
let mut children_info = BTreeSet::<ChildInfo>::new();
let mut children_result = BTreeMap::new();
for (storage_key, _) in changes.prospective.children.iter()
.chain(changes.committed.children.iter()) {
children_keys.insert(storage_key.clone());
for (_storage_key, (_map, child_info)) in changes.prospective.children_default.iter()
.chain(changes.committed.children_default.iter()) {
children_info.insert(child_info.clone());
}
for storage_key in children_keys {
for child_info in children_info {
let child_index = ChildIndex::<Number> {
block: block.clone(),
storage_key: storage_key.clone(),
storage_key: child_info.prefixed_storage_key(),
};
let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(storage_key))?;
let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(child_info))?;
children_result.insert(child_index, iter);
}
@@ -130,22 +131,22 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>(
backend: &'a B,
block: &Number,
changes: &'a OverlayedChanges,
storage_key: Option<StorageKey>,
child_info: Option<ChildInfo>,
) -> Result<impl Iterator<Item=InputPair<Number>> + 'a, String>
where
B: Backend<H>,
H: Hasher,
Number: BlockNumber,
{
let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() {
let child_info = changes.child_info(sk).cloned();
(
changes.committed.children.get(sk).map(|c| &c.0),
changes.prospective.children.get(sk).map(|c| &c.0),
child_info,
)
let (committed, prospective) = if let Some(child_info) = child_info.as_ref() {
match child_info.child_type() {
ChildType::ParentKeyId => (
changes.committed.children_default.get(child_info.storage_key()).map(|c| &c.0),
changes.prospective.children_default.get(child_info.storage_key()).map(|c| &c.0),
),
}
} else {
(Some(&changes.committed.top), Some(&changes.prospective.top), None)
(Some(&changes.committed.top), Some(&changes.prospective.top))
};
committed.iter().flat_map(|c| c.iter())
.chain(prospective.iter().flat_map(|c| c.iter()))
@@ -155,13 +156,11 @@ fn prepare_extrinsics_input_inner<'a, B, H, Number>(
Entry::Vacant(entry) => {
// ignore temporary values (values that have null value at the end of operation
// AND are not in storage at the beginning of operation
if let Some(sk) = storage_key.as_ref() {
if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() {
if let Some(child_info) = child_info.as_ref() {
if !backend.exists_child_storage(sk, child_info.as_ref(), k)
.map_err(|e| format!("{}", e))? {
return Ok(map);
}
if let Some(child_info) = child_info.as_ref() {
if !changes.child_storage(child_info, k).map(|v| v.is_some()).unwrap_or_default() {
if !backend.exists_child_storage(&child_info, k)
.map_err(|e| format!("{}", e))? {
return Ok(map);
}
}
} else {
@@ -281,7 +280,7 @@ fn prepare_digest_input<'a, H, Number>(
return Ok((map, child_map));
}
let mut children_roots = BTreeMap::<StorageKey, _>::new();
let mut children_roots = BTreeMap::<PrefixedStorageKey, _>::new();
{
let trie_storage = TrieBackendEssence::<_, H>::new(
crate::changes_trie::TrieBackendStorageAdapter(storage),
@@ -344,22 +343,20 @@ mod test {
use codec::Encode;
use sp_core::Blake2Hasher;
use sp_core::storage::well_known_keys::EXTRINSIC_INDEX;
use sp_core::storage::ChildInfo;
use crate::InMemoryBackend;
use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage};
use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData};
use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet};
use super::*;
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1");
const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2");
fn prepare_for_build(zero: u64) -> (
InMemoryBackend<Blake2Hasher>,
InMemoryStorage<Blake2Hasher, u64>,
OverlayedChanges,
Configuration,
) {
let child_info_1 = ChildInfo::new_default(b"storage_key1");
let child_info_2 = ChildInfo::new_default(b"storage_key2");
let backend: InMemoryBackend<_> = vec![
(vec![100], vec![255]),
(vec![101], vec![255]),
@@ -368,8 +365,9 @@ mod test {
(vec![104], vec![255]),
(vec![105], vec![255]),
].into_iter().collect::<std::collections::BTreeMap<_, _>>().into();
let child_trie_key1 = b"1".to_vec();
let child_trie_key2 = b"2".to_vec();
let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key();
let child_trie_key1 = child_info_1.storage_key().to_vec();
let child_trie_key2 = child_info_2.storage_key().to_vec();
let storage = InMemoryStorage::with_inputs(vec![
(zero + 1, vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]),
@@ -403,7 +401,7 @@ mod test {
]),
(zero + 9, Vec::new()), (zero + 10, Vec::new()), (zero + 11, Vec::new()), (zero + 12, Vec::new()),
(zero + 13, Vec::new()), (zero + 14, Vec::new()), (zero + 15, Vec::new()),
], vec![(child_trie_key1.clone(), vec![
], vec![(prefixed_child_trie_key1.clone(), vec![
(zero + 1, vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]),
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]),
@@ -430,19 +428,19 @@ mod test {
extrinsics: Some(vec![0, 1].into_iter().collect())
}),
].into_iter().collect(),
children: vec![
children_default: vec![
(child_trie_key1.clone(), (vec![
(vec![100], OverlayedValue {
value: Some(vec![200]),
extrinsics: Some(vec![0, 2].into_iter().collect())
})
].into_iter().collect(), CHILD_INFO_1.to_owned())),
].into_iter().collect(), child_info_1.to_owned())),
(child_trie_key2, (vec![
(vec![100], OverlayedValue {
value: Some(vec![200]),
extrinsics: Some(vec![0, 2].into_iter().collect())
})
].into_iter().collect(), CHILD_INFO_2.to_owned())),
].into_iter().collect(), child_info_2.to_owned())),
].into_iter().collect()
},
committed: OverlayedChangeSet { top: vec![
@@ -459,13 +457,13 @@ mod test {
extrinsics: Some(vec![1].into_iter().collect())
}),
].into_iter().collect(),
children: vec![
children_default: vec![
(child_trie_key1, (vec![
(vec![100], OverlayedValue {
value: Some(vec![202]),
extrinsics: Some(vec![3].into_iter().collect())
})
].into_iter().collect(), CHILD_INFO_1.to_owned())),
].into_iter().collect(), child_info_1.to_owned())),
].into_iter().collect(),
},
collect_extrinsics: true,
@@ -487,6 +485,8 @@ mod test {
#[test]
fn build_changes_trie_nodes_on_non_digest_block() {
fn test_with_zero(zero: u64) {
let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key();
let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key();
let (backend, storage, changes, config) = prepare_for_build(zero);
let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 };
let changes_trie_nodes = prepare_input(
@@ -503,11 +503,11 @@ mod test {
]);
assert_eq!(changes_trie_nodes.1.into_iter()
.map(|(k,v)| (k, v.collect::<Vec<_>>())).collect::<Vec<_>>(), vec![
(ChildIndex { block: zero + 5u64, storage_key: b"1".to_vec() },
(ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, vec![0, 2, 3]),
]),
(ChildIndex { block: zero + 5, storage_key: b"2".to_vec() },
(ChildIndex { block: zero + 5, storage_key: child_trie_key2 },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2]),
]),
@@ -523,6 +523,8 @@ mod test {
#[test]
fn build_changes_trie_nodes_on_digest_block_l1() {
fn test_with_zero(zero: u64) {
let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key();
let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key();
let (backend, storage, changes, config) = prepare_for_build(zero);
let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 };
let changes_trie_nodes = prepare_input(
@@ -544,7 +546,7 @@ mod test {
]);
assert_eq!(changes_trie_nodes.1.into_iter()
.map(|(k,v)| (k, v.collect::<Vec<_>>())).collect::<Vec<_>>(), vec![
(ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() },
(ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]),
@@ -553,7 +555,7 @@ mod test {
InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]),
InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]),
]),
(ChildIndex { block: zero + 4, storage_key: b"2".to_vec() },
(ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]),
]),
@@ -568,6 +570,8 @@ mod test {
#[test]
fn build_changes_trie_nodes_on_digest_block_l2() {
fn test_with_zero(zero: u64) {
let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key();
let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key();
let (backend, storage, changes, config) = prepare_for_build(zero);
let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 };
let changes_trie_nodes = prepare_input(
@@ -590,13 +594,13 @@ mod test {
]);
assert_eq!(changes_trie_nodes.1.into_iter()
.map(|(k,v)| (k, v.collect::<Vec<_>>())).collect::<Vec<_>>(), vec![
(ChildIndex { block: zero + 16u64, storage_key: b"1".to_vec() },
(ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, vec![0, 2, 3]),
InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]),
]),
(ChildIndex { block: zero + 16, storage_key: b"2".to_vec() },
(ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2]),
]),
@@ -657,6 +661,8 @@ mod test {
#[test]
fn build_changes_trie_nodes_ignores_temporary_storage_values() {
fn test_with_zero(zero: u64) {
let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key();
let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key();
let (backend, storage, mut changes, config) = prepare_for_build(zero);
// 110: missing from backend, set to None in overlay
@@ -685,7 +691,7 @@ mod test {
]);
assert_eq!(changes_trie_nodes.1.into_iter()
.map(|(k,v)| (k, v.collect::<Vec<_>>())).collect::<Vec<_>>(), vec![
(ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() },
(ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]),
@@ -694,7 +700,7 @@ mod test {
InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]),
InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]),
]),
(ChildIndex { block: zero + 4, storage_key: b"2".to_vec() },
(ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() },
vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]),
]),
@@ -709,6 +715,8 @@ mod test {
#[test]
fn cache_is_used_when_changes_trie_is_built() {
let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key();
let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key();
let (backend, mut storage, changes, config) = prepare_for_build(0);
let parent = AnchorBlockId { hash: Default::default(), number: 15 };
@@ -728,8 +736,8 @@ mod test {
let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new())
.set_digest_input_blocks(vec![1, 2, 3])
.insert(None, vec![vec![100], vec![102]].into_iter().collect())
.insert(Some(b"1".to_vec()), vec![vec![103], vec![104]].into_iter().collect())
.insert(Some(b"2".to_vec()), vec![vec![105], vec![106]].into_iter().collect())
.insert(Some(child_trie_key1.clone()), vec![vec![103], vec![104]].into_iter().collect())
.insert(Some(child_trie_key2.clone()), vec![vec![105], vec![106]].into_iter().collect())
.complete(4, &trie_root4);
storage.cache_mut().perform(cached_data4);
@@ -755,7 +763,10 @@ mod test {
.map(|(k, i)| (k, i.collect::<Vec<_>>()))
.collect::<BTreeMap<_, _>>();
assert_eq!(
child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"1".to_vec() }).unwrap(),
child_changes_tries_nodes.get(&ChildIndex {
block: 16u64,
storage_key: child_trie_key1.clone(),
}).unwrap(),
&vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2, 3]),
@@ -764,7 +775,7 @@ mod test {
],
);
assert_eq!(
child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"2".to_vec() }).unwrap(),
child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }).unwrap(),
&vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2]),
@@ -19,6 +19,7 @@
use std::collections::{HashMap, HashSet};
use crate::StorageKey;
use sp_core::storage::PrefixedStorageKey;
/// Changes trie build cache.
///
@@ -38,7 +39,7 @@ pub struct BuildCache<H, N> {
/// The `Option<Vec<u8>>` in inner `HashMap` stands for the child storage key.
/// If it is `None`, then the `HashSet` contains keys changed in top-level storage.
/// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by the key.
changed_keys: HashMap<H, HashMap<Option<StorageKey>, HashSet<StorageKey>>>,
changed_keys: HashMap<H, HashMap<Option<PrefixedStorageKey>, HashSet<StorageKey>>>,
}
/// The action to perform when block-with-changes-trie is imported.
@@ -56,7 +57,7 @@ pub struct CachedBuildData<H, N> {
block: N,
trie_root: H,
digest_input_blocks: Vec<N>,
changed_keys: HashMap<Option<StorageKey>, HashSet<StorageKey>>,
changed_keys: HashMap<Option<PrefixedStorageKey>, HashSet<StorageKey>>,
}
/// The action to perform when block-with-changes-trie is imported.
@@ -72,7 +73,7 @@ pub(crate) enum IncompleteCacheAction<N> {
#[derive(Debug, PartialEq)]
pub(crate) struct IncompleteCachedBuildData<N> {
digest_input_blocks: Vec<N>,
changed_keys: HashMap<Option<StorageKey>, HashSet<StorageKey>>,
changed_keys: HashMap<Option<PrefixedStorageKey>, HashSet<StorageKey>>,
}
impl<H, N> BuildCache<H, N>
@@ -89,7 +90,7 @@ impl<H, N> BuildCache<H, N>
}
/// Get cached changed keys for changes trie with given root.
pub fn get(&self, root: &H) -> Option<&HashMap<Option<StorageKey>, HashSet<StorageKey>>> {
pub fn get(&self, root: &H) -> Option<&HashMap<Option<PrefixedStorageKey>, HashSet<StorageKey>>> {
self.changed_keys.get(&root)
}
@@ -98,7 +99,7 @@ impl<H, N> BuildCache<H, N>
pub fn with_changed_keys(
&self,
root: &H,
functor: &mut dyn FnMut(&HashMap<Option<StorageKey>, HashSet<StorageKey>>),
functor: &mut dyn FnMut(&HashMap<Option<PrefixedStorageKey>, HashSet<StorageKey>>),
) -> bool {
match self.changed_keys.get(&root) {
Some(changed_keys) => {
@@ -164,7 +165,7 @@ impl<N> IncompleteCacheAction<N> {
/// Insert changed keys of given storage into cached data.
pub(crate) fn insert(
self,
storage_key: Option<StorageKey>,
storage_key: Option<PrefixedStorageKey>,
changed_keys: HashSet<StorageKey>,
) -> Self {
match self {
@@ -200,7 +201,7 @@ impl<N> IncompleteCachedBuildData<N> {
fn insert(
mut self,
storage_key: Option<StorageKey>,
storage_key: Option<PrefixedStorageKey>,
changed_keys: HashSet<StorageKey>,
) -> Self {
self.changed_keys.insert(storage_key, changed_keys);
@@ -22,6 +22,7 @@ use std::collections::VecDeque;
use codec::{Decode, Encode, Codec};
use hash_db::Hasher;
use num_traits::Zero;
use sp_core::storage::PrefixedStorageKey;
use sp_trie::Recorder;
use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber};
use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue};
@@ -40,7 +41,7 @@ pub fn key_changes<'a, H: Hasher, Number: BlockNumber>(
begin: Number,
end: &'a AnchorBlockId<H::Out, Number>,
max: Number,
storage_key: Option<&'a [u8]>,
storage_key: Option<&'a PrefixedStorageKey>,
key: &'a [u8],
) -> Result<DrilldownIterator<'a, H, Number>, String> {
// we can't query any roots before root
@@ -79,7 +80,7 @@ pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>(
begin: Number,
end: &AnchorBlockId<H::Out, Number>,
max: Number,
storage_key: Option<&[u8]>,
storage_key: Option<&PrefixedStorageKey>,
key: &[u8],
) -> Result<Vec<Vec<u8>>, String> where H::Out: Codec {
// we can't query any roots before root
@@ -127,7 +128,7 @@ pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>(
begin: Number,
end: &AnchorBlockId<H::Out, Number>,
max: Number,
storage_key: Option<&[u8]>,
storage_key: Option<&PrefixedStorageKey>,
key: &[u8]
) -> Result<Vec<(Number, u32)>, String> where H::Out: Encode {
key_changes_proof_check_with_db(
@@ -150,7 +151,7 @@ pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>(
begin: Number,
end: &AnchorBlockId<H::Out, Number>,
max: Number,
storage_key: Option<&[u8]>,
storage_key: Option<&PrefixedStorageKey>,
key: &[u8]
) -> Result<Vec<(Number, u32)>, String> where H::Out: Encode {
// we can't query any roots before root
@@ -188,7 +189,7 @@ pub struct DrilldownIteratorEssence<'a, H, Number>
Number: BlockNumber,
H::Out: 'a,
{
storage_key: Option<&'a [u8]>,
storage_key: Option<&'a PrefixedStorageKey>,
key: &'a [u8],
roots_storage: &'a dyn RootsStorage<H, Number>,
storage: &'a dyn Storage<H, Number>,
@@ -238,7 +239,7 @@ impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number>
let trie_root = if let Some(storage_key) = self.storage_key {
let child_key = ChildIndex {
block: block.clone(),
storage_key: storage_key.to_vec(),
storage_key: storage_key.clone(),
}.encode();
if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)?
.and_then(|v| <Vec<u8>>::decode(&mut &v[..]).ok())
@@ -382,6 +383,11 @@ mod tests {
use sp_runtime::traits::BlakeTwo256;
use super::*;
fn child_key() -> PrefixedStorageKey {
let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]);
child_info.prefixed_storage_key()
}
fn prepare_for_drilldown() -> (Configuration, InMemoryStorage<BlakeTwo256, u64>) {
let config = Configuration { digest_interval: 4, digest_levels: 2 };
let backend = InMemoryStorage::with_inputs(vec![
@@ -418,7 +424,7 @@ mod tests {
(16, vec![
InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![4, 8]),
]),
], vec![(b"1".to_vec(), vec![
], vec![(child_key(), vec![
(1, vec![
InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![42] }, vec![0]),
]),
@@ -535,7 +541,7 @@ mod tests {
1,
&AnchorBlockId { hash: Default::default(), number: 100 },
1000,
Some(&b"1"[..]),
Some(&child_key()),
&[42],
).and_then(|i| i.collect::<Result<Vec<_>, _>>()).is_err());
}
@@ -577,7 +583,7 @@ mod tests {
let (remote_config, remote_storage) = prepare_for_drilldown();
let remote_proof_child = key_changes_proof::<BlakeTwo256, u64>(
configuration_range(&remote_config, 0), &remote_storage, 1,
&AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]).unwrap();
&AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]).unwrap();
// happens on local light node:
@@ -592,7 +598,7 @@ mod tests {
local_storage.clear_storage();
let local_result_child = key_changes_proof_check::<BlakeTwo256, u64>(
configuration_range(&local_config, 0), &local_storage, remote_proof_child, 1,
&AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]);
&AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&child_key()), &[42]);
// check that drilldown result is the same as if it was happening at the full node
assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)]));
@@ -21,6 +21,7 @@ use crate::{
StorageKey, StorageValue,
changes_trie::BlockNumber
};
use sp_core::storage::PrefixedStorageKey;
/// Key of { changed key => set of extrinsic indices } mapping.
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -49,7 +50,7 @@ pub struct ChildIndex<Number: BlockNumber> {
/// Block at which this key has been inserted in the trie.
pub block: Number,
/// Storage key this node is responsible for.
pub storage_key: StorageKey,
pub storage_key: PrefixedStorageKey,
}
/// Value of { changed key => block/digest block numbers } mapping.
@@ -178,7 +179,7 @@ impl<Number: BlockNumber> Decode for InputKey<Number> {
})),
3 => Ok(InputKey::ChildIndex(ChildIndex {
block: Decode::decode(input)?,
storage_key: Decode::decode(input)?,
storage_key: PrefixedStorageKey::new(Decode::decode(input)?),
})),
_ => Err("Invalid input key variant".into()),
}
@@ -71,6 +71,7 @@ use hash_db::{Hasher, Prefix};
use num_traits::{One, Zero};
use codec::{Decode, Encode};
use sp_core;
use sp_core::storage::PrefixedStorageKey;
use sp_trie::{MemoryDB, DBValue, TrieMut};
use sp_trie::trie_types::TrieDBMut;
use crate::{
@@ -156,7 +157,7 @@ pub trait Storage<H: Hasher, Number: BlockNumber>: RootsStorage<H, Number> {
fn with_cached_changed_keys(
&self,
root: &H::Out,
functor: &mut dyn FnMut(&HashMap<Option<StorageKey>, HashSet<StorageKey>>),
functor: &mut dyn FnMut(&HashMap<Option<PrefixedStorageKey>, HashSet<StorageKey>>),
) -> bool;
/// Get a trie node.
fn get(&self, key: &H::Out, prefix: Prefix) -> Result<Option<DBValue>, String>;
@@ -137,7 +137,8 @@ mod tests {
#[test]
fn prune_works() {
fn prepare_storage() -> InMemoryStorage<BlakeTwo256, u64> {
let child_key = ChildIndex { block: 67u64, storage_key: b"1".to_vec() }.encode();
let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]);
let child_key = ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() }.encode();
let mut mdb1 = MemoryDB::<BlakeTwo256>::default();
let root1 = insert_into_memory_db::<BlakeTwo256, _>(
&mut mdb1, vec![(vec![10], vec![20])]).unwrap();
@@ -18,6 +18,7 @@
use std::collections::{BTreeMap, HashSet, HashMap};
use hash_db::{Hasher, Prefix, EMPTY_PREFIX};
use sp_core::storage::PrefixedStorageKey;
use sp_trie::DBValue;
use sp_trie::MemoryDB;
use parking_lot::RwLock;
@@ -96,7 +97,7 @@ impl<H: Hasher, Number: BlockNumber> InMemoryStorage<H, Number> {
#[cfg(test)]
pub fn with_inputs(
mut top_inputs: Vec<(Number, Vec<InputPair<Number>>)>,
children_inputs: Vec<(StorageKey, Vec<(Number, Vec<InputPair<Number>>)>)>,
children_inputs: Vec<(PrefixedStorageKey, Vec<(Number, Vec<InputPair<Number>>)>)>,
) -> Self {
let mut mdb = MemoryDB::default();
let mut roots = BTreeMap::new();
@@ -182,7 +183,7 @@ impl<H: Hasher, Number: BlockNumber> Storage<H, Number> for InMemoryStorage<H, N
fn with_cached_changed_keys(
&self,
root: &H::Out,
functor: &mut dyn FnMut(&HashMap<Option<StorageKey>, HashSet<StorageKey>>),
functor: &mut dyn FnMut(&HashMap<Option<PrefixedStorageKey>, HashSet<StorageKey>>),
) -> bool {
self.cache.with_changed_keys(root, functor)
}
+71 -86
View File
@@ -24,10 +24,10 @@ use crate::{
use hash_db::Hasher;
use sp_core::{
storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo},
storage::{well_known_keys::is_child_storage_key, ChildInfo},
traits::Externalities, hexdisplay::HexDisplay,
};
use sp_trie::{trie_types::Layout, default_child_trie_root};
use sp_trie::{trie_types::Layout, empty_child_trie_root};
use sp_externalities::{Extensions, Extension};
use codec::{Decode, Encode};
@@ -181,22 +181,21 @@ where
fn child_storage(
&self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<StorageValue> {
let _guard = sp_panic_handler::AbortGuard::force_abort();
let result = self.overlay
.child_storage(storage_key.as_ref(), key)
.child_storage(child_info, key)
.map(|x| x.map(|x| x.to_vec()))
.unwrap_or_else(||
self.backend.child_storage(storage_key.as_ref(), child_info, key)
self.backend.child_storage(child_info, key)
.expect(EXT_NOT_ALLOWED_TO_FAIL)
);
trace!(target: "state-trace", "{:04x}: GetChild({}) {}={:?}",
self.id,
HexDisplay::from(&storage_key.as_ref()),
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&key),
result.as_ref().map(HexDisplay::from)
);
@@ -206,22 +205,21 @@ where
fn child_storage_hash(
&self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<Vec<u8>> {
let _guard = sp_panic_handler::AbortGuard::force_abort();
let result = self.overlay
.child_storage(storage_key.as_ref(), key)
.child_storage(child_info, key)
.map(|x| x.map(|x| H::hash(x)))
.unwrap_or_else(||
self.backend.child_storage_hash(storage_key.as_ref(), child_info, key)
self.backend.child_storage_hash(child_info, key)
.expect(EXT_NOT_ALLOWED_TO_FAIL)
);
trace!(target: "state-trace", "{:04x}: ChildHash({}) {}={:?}",
self.id,
HexDisplay::from(&storage_key.as_ref()),
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&key),
result,
);
@@ -247,22 +245,21 @@ where
fn exists_child_storage(
&self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> bool {
let _guard = sp_panic_handler::AbortGuard::force_abort();
let result = match self.overlay.child_storage(storage_key.as_ref(), key) {
let result = match self.overlay.child_storage(child_info, key) {
Some(x) => x.is_some(),
_ => self.backend
.exists_child_storage(storage_key.as_ref(), child_info, key)
.exists_child_storage(child_info, key)
.expect(EXT_NOT_ALLOWED_TO_FAIL),
};
trace!(target: "state-trace", "{:04x}: ChildExists({}) {}={:?}",
self.id,
HexDisplay::from(&storage_key.as_ref()),
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&key),
result,
);
@@ -286,15 +283,14 @@ where
fn next_child_storage_key(
&self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Option<StorageKey> {
let next_backend_key = self.backend
.next_child_storage_key(storage_key.as_ref(), child_info, key)
.next_child_storage_key(child_info, key)
.expect(EXT_NOT_ALLOWED_TO_FAIL);
let next_overlay_key_change = self.overlay.next_child_storage_key_change(
storage_key.as_ref(),
child_info.storage_key(),
key
);
@@ -305,7 +301,6 @@ where
Some(overlay_key.0.to_vec())
} else {
self.next_child_storage_key(
storage_key,
child_info,
&overlay_key.0[..],
)
@@ -331,38 +326,36 @@ where
fn place_child_storage(
&mut self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: StorageKey,
value: Option<StorageValue>,
) {
trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}",
self.id,
HexDisplay::from(&storage_key.as_ref()),
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&key),
value.as_ref().map(HexDisplay::from)
);
let _guard = sp_panic_handler::AbortGuard::force_abort();
self.mark_dirty();
self.overlay.set_child_storage(storage_key.into_owned(), child_info, key, value);
self.overlay.set_child_storage(child_info, key, value);
}
fn kill_child_storage(
&mut self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
) {
trace!(target: "state-trace", "{:04x}: KillChild({})",
self.id,
HexDisplay::from(&storage_key.as_ref()),
HexDisplay::from(&child_info.storage_key()),
);
let _guard = sp_panic_handler::AbortGuard::force_abort();
self.mark_dirty();
self.overlay.clear_child_storage(storage_key.as_ref(), child_info);
self.backend.for_keys_in_child_storage(storage_key.as_ref(), child_info, |key| {
self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None);
self.overlay.clear_child_storage(child_info);
self.backend.for_keys_in_child_storage(child_info, |key| {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
});
}
@@ -386,21 +379,20 @@ where
fn clear_child_prefix(
&mut self,
storage_key: ChildStorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) {
trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}",
self.id,
HexDisplay::from(&storage_key.as_ref()),
HexDisplay::from(&child_info.storage_key()),
HexDisplay::from(&prefix),
);
let _guard = sp_panic_handler::AbortGuard::force_abort();
self.mark_dirty();
self.overlay.clear_child_prefix(storage_key.as_ref(), child_info, prefix);
self.backend.for_child_keys_with_prefix(storage_key.as_ref(), child_info, prefix, |key| {
self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None);
self.overlay.clear_child_prefix(child_info, prefix);
self.backend.for_child_keys_with_prefix(child_info, prefix, |key| {
self.overlay.set_child_storage(child_info, key.to_vec(), None);
});
}
@@ -425,37 +417,38 @@ where
fn child_storage_root(
&mut self,
storage_key: ChildStorageKey,
child_info: &ChildInfo,
) -> Vec<u8> {
let _guard = sp_panic_handler::AbortGuard::force_abort();
let storage_key = child_info.storage_key();
let prefixed_storage_key = child_info.prefixed_storage_key();
if self.storage_transaction_cache.transaction_storage_root.is_some() {
let root = self
.storage(storage_key.as_ref())
.storage(prefixed_storage_key.as_slice())
.and_then(|k| Decode::decode(&mut &k[..]).ok())
.unwrap_or(
default_child_trie_root::<Layout<H>>(storage_key.as_ref())
empty_child_trie_root::<Layout<H>>()
);
trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}",
self.id,
HexDisplay::from(&storage_key.as_ref()),
HexDisplay::from(&storage_key),
HexDisplay::from(&root.as_ref()),
);
root.encode()
} else {
let storage_key = storage_key.as_ref();
if let Some(child_info) = self.overlay.child_info(storage_key).cloned() {
if let Some(child_info) = self.overlay.default_child_info(storage_key).cloned() {
let (root, is_empty, _) = {
let delta = self.overlay.committed.children.get(storage_key)
let delta = self.overlay.committed.children_default.get(storage_key)
.into_iter()
.flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value)))
.chain(
self.overlay.prospective.children.get(storage_key)
self.overlay.prospective.children_default.get(storage_key)
.into_iter()
.flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value)))
);
self.backend.child_storage_root(storage_key, child_info.as_ref(), delta)
self.backend.child_storage_root(&child_info, delta)
};
let root = root.encode();
@@ -465,9 +458,9 @@ where
// A better design would be to manage 'child_storage_transaction' in a
// similar way as 'storage_transaction' but for each child trie.
if is_empty {
self.overlay.set_storage(storage_key.into(), None);
self.overlay.set_storage(prefixed_storage_key.into_inner(), None);
} else {
self.overlay.set_storage(storage_key.into(), Some(root.clone()));
self.overlay.set_storage(prefixed_storage_key.into_inner(), Some(root.clone()));
}
trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}",
@@ -479,10 +472,10 @@ where
} else {
// empty overlay
let root = self
.storage(storage_key.as_ref())
.storage(prefixed_storage_key.as_slice())
.and_then(|k| Decode::decode(&mut &k[..]).ok())
.unwrap_or(
default_child_trie_root::<Layout<H>>(storage_key.as_ref())
empty_child_trie_root::<Layout<H>>()
);
trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}",
self.id,
@@ -591,11 +584,6 @@ mod tests {
type TestBackend = InMemoryBackend<Blake2Hasher>;
type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>;
const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1";
const CHILD_UUID_1: &[u8] = b"unique_id_1";
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1);
fn prepare_overlay_with_changes() -> OverlayedChanges {
OverlayedChanges {
prospective: vec![
@@ -680,7 +668,7 @@ mod tests {
vec![20] => vec![20],
vec![40] => vec![40]
],
children: map![]
children_default: map![]
}.into();
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
@@ -707,26 +695,23 @@ mod tests {
#[test]
fn next_child_storage_key_works() {
const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1";
const CHILD_UUID_1: &[u8] = b"unique_id_1";
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1);
let child_info = ChildInfo::new_default(b"Child1");
let child_info = &child_info;
let mut cache = StorageTransactionCache::default();
let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap();
let mut overlay = OverlayedChanges::default();
overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None);
overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31]));
overlay.set_child_storage(child_info, vec![20], None);
overlay.set_child_storage(child_info, vec![30], Some(vec![31]));
let backend = Storage {
top: map![],
children: map![
child().as_ref().to_vec() => StorageChild {
children_default: map![
child_info.storage_key().to_vec() => StorageChild {
data: map![
vec![10] => vec![10],
vec![20] => vec![20],
vec![40] => vec![40]
],
child_info: CHILD_INFO_1.to_owned(),
child_info: child_info.to_owned(),
}
],
}.into();
@@ -735,65 +720,65 @@ mod tests {
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
// next_backend < next_overlay
assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[5]), Some(vec![10]));
assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10]));
// next_backend == next_overlay but next_overlay is a delete
assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[10]), Some(vec![30]));
assert_eq!(ext.next_child_storage_key(child_info, &[10]), Some(vec![30]));
// next_overlay < next_backend
assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[20]), Some(vec![30]));
assert_eq!(ext.next_child_storage_key(child_info, &[20]), Some(vec![30]));
// next_backend exist but next_overlay doesn't exist
assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[30]), Some(vec![40]));
assert_eq!(ext.next_child_storage_key(child_info, &[30]), Some(vec![40]));
drop(ext);
overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![50], Some(vec![50]));
overlay.set_child_storage(child_info, vec![50], Some(vec![50]));
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
// next_overlay exist but next_backend doesn't exist
assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[40]), Some(vec![50]));
assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50]));
}
#[test]
fn child_storage_works() {
let child_info = ChildInfo::new_default(b"Child1");
let child_info = &child_info;
let mut cache = StorageTransactionCache::default();
let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap();
let mut overlay = OverlayedChanges::default();
overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None);
overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31]));
overlay.set_child_storage(child_info, vec![20], None);
overlay.set_child_storage(child_info, vec![30], Some(vec![31]));
let backend = Storage {
top: map![],
children: map![
child().as_ref().to_vec() => StorageChild {
children_default: map![
child_info.storage_key().to_vec() => StorageChild {
data: map![
vec![10] => vec![10],
vec![20] => vec![20],
vec![30] => vec![40]
],
child_info: CHILD_INFO_1.to_owned(),
child_info: child_info.to_owned(),
}
],
}.into();
let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None);
assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10]));
assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10]));
assert_eq!(
ext.child_storage_hash(child(), CHILD_INFO_1, &[10]),
ext.child_storage_hash(child_info, &[10]),
Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()),
);
assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[20]), None);
assert_eq!(ext.child_storage(child_info, &[20]), None);
assert_eq!(
ext.child_storage_hash(child(), CHILD_INFO_1, &[20]),
ext.child_storage_hash(child_info, &[20]),
None,
);
assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![31]));
assert_eq!(ext.child_storage(child_info, &[30]), Some(vec![31]));
assert_eq!(
ext.child_storage_hash(child(), CHILD_INFO_1, &[30]),
ext.child_storage_hash(child_info, &[30]),
Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()),
);
}
}
@@ -25,10 +25,10 @@ use crate::{
use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops};
use hash_db::Hasher;
use sp_trie::{
MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout,
MemoryDB, child_trie_root, empty_child_trie_root, TrieConfiguration, trie_types::Layout,
};
use codec::Codec;
use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage};
use sp_core::storage::{ChildInfo, ChildType, Storage};
/// Error impossible.
// FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121
@@ -48,7 +48,7 @@ impl error::Error for Void {
/// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for
/// tests and proof checking.
pub struct InMemory<H: Hasher> {
inner: HashMap<Option<(StorageKey, OwnedChildInfo)>, BTreeMap<StorageKey, StorageValue>>,
inner: HashMap<Option<ChildInfo>, BTreeMap<StorageKey, StorageValue>>,
// This field is only needed for returning reference in `as_trie_backend`.
trie: Option<TrieBackend<MemoryDB<H>, H>>,
_hasher: PhantomData<H>,
@@ -89,7 +89,7 @@ impl<H: Hasher> PartialEq for InMemory<H> {
impl<H: Hasher> InMemory<H> {
/// Copy the state, with applied updates
pub fn update<
T: IntoIterator<Item = (Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>
T: IntoIterator<Item = (Option<ChildInfo>, StorageCollection)>
>(
&self,
changes: T,
@@ -108,10 +108,10 @@ impl<H: Hasher> InMemory<H> {
}
}
impl<H: Hasher> From<HashMap<Option<(StorageKey, OwnedChildInfo)>, BTreeMap<StorageKey, StorageValue>>>
impl<H: Hasher> From<HashMap<Option<ChildInfo>, BTreeMap<StorageKey, StorageValue>>>
for InMemory<H>
{
fn from(inner: HashMap<Option<(StorageKey, OwnedChildInfo)>, BTreeMap<StorageKey, StorageValue>>) -> Self {
fn from(inner: HashMap<Option<ChildInfo>, BTreeMap<StorageKey, StorageValue>>) -> Self {
InMemory {
inner,
trie: None,
@@ -122,8 +122,8 @@ impl<H: Hasher> From<HashMap<Option<(StorageKey, OwnedChildInfo)>, BTreeMap<Stor
impl<H: Hasher> From<Storage> for InMemory<H> {
fn from(inners: Storage) -> Self {
let mut inner: HashMap<Option<(StorageKey, OwnedChildInfo)>, BTreeMap<StorageKey, StorageValue>>
= inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect();
let mut inner: HashMap<Option<ChildInfo>, BTreeMap<StorageKey, StorageValue>>
= inners.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect();
inner.insert(None, inners.top);
InMemory {
inner,
@@ -145,12 +145,12 @@ impl<H: Hasher> From<BTreeMap<StorageKey, StorageValue>> for InMemory<H> {
}
}
impl<H: Hasher> From<Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>>
impl<H: Hasher> From<Vec<(Option<ChildInfo>, StorageCollection)>>
for InMemory<H> {
fn from(
inner: Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>,
inner: Vec<(Option<ChildInfo>, StorageCollection)>,
) -> Self {
let mut expanded: HashMap<Option<(StorageKey, OwnedChildInfo)>, BTreeMap<StorageKey, StorageValue>>
let mut expanded: HashMap<Option<ChildInfo>, BTreeMap<StorageKey, StorageValue>>
= HashMap::new();
for (child_info, key_values) in inner {
let entry = expanded.entry(child_info).or_default();
@@ -165,18 +165,16 @@ impl<H: Hasher> From<Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollectio
}
impl<H: Hasher> InMemory<H> {
/// child storage key iterator
pub fn child_storage_keys(&self) -> impl Iterator<Item=(&[u8], ChildInfo)> {
self.inner.iter().filter_map(|item|
item.0.as_ref().map(|v|(&v.0[..], v.1.as_ref()))
)
/// Child storage infos iterator.
pub fn child_storage_infos(&self) -> impl Iterator<Item = &ChildInfo> {
self.inner.iter().filter_map(|item| item.0.as_ref())
}
}
impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: Codec {
type Error = Void;
type Transaction = Vec<(
Option<(StorageKey, OwnedChildInfo)>,
Option<ChildInfo>,
StorageCollection,
)>;
type TrieBackendStorage = MemoryDB<H>;
@@ -187,11 +185,10 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: Codec {
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<StorageValue>, Self::Error> {
Ok(self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned())))
Ok(self.inner.get(&Some(child_info.to_owned()))
.and_then(|map| map.get(key).map(Clone::clone)))
}
@@ -211,22 +208,20 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: Codec {
fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
mut f: F,
) {
self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned())))
self.inner.get(&Some(child_info.to_owned()))
.map(|map| map.keys().for_each(|k| f(&k)));
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned())))
self.inner.get(&Some(child_info.to_owned()))
.map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f));
}
@@ -253,16 +248,15 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: Codec {
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (H::Out, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
let storage_key = storage_key.to_vec();
let child_info = Some((storage_key.clone(), child_info.to_owned()));
let child_type = child_info.child_type();
let child_info = Some(child_info.to_owned());
let existing_pairs = self.inner.get(&child_info)
.into_iter()
@@ -270,7 +264,6 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: Codec {
let transaction: Vec<_> = delta.into_iter().collect();
let root = child_trie_root::<Layout<H>, _, _, _>(
&storage_key,
existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
@@ -279,7 +272,9 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: Codec {
let full_transaction = transaction.into_iter().collect();
let is_default = root == default_child_trie_root::<Layout<H>>(&storage_key);
let is_default = match child_type {
ChildType::ParentKeyId => root == empty_child_trie_root::<Layout<H>>(),
};
(root, is_default, vec![(child_info, full_transaction)])
}
@@ -294,12 +289,11 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: Codec {
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<StorageKey>, Self::Error> {
let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded);
let next_key = self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned())))
let next_key = self.inner.get(&Some(child_info.to_owned()))
.and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned());
Ok(next_key)
@@ -321,11 +315,10 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: Codec {
fn child_keys(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) -> Vec<StorageKey> {
self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned())))
self.inner.get(&Some(child_info.to_owned()))
.into_iter()
.flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned())
.collect()
@@ -336,11 +329,10 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: Codec {
let mut new_child_roots = Vec::new();
let mut root_map = None;
for (child_info, map) in &self.inner {
if let Some((storage_key, _child_info)) = child_info.as_ref() {
// no need to use child_info at this point because we use a MemoryDB for
// proof (with PrefixedMemoryDB it would be needed).
if let Some(child_info) = child_info.as_ref() {
let prefix_storage_key = child_info.prefixed_storage_key();
let ch = insert_into_memory_db::<H, _>(&mut mdb, map.clone().into_iter())?;
new_child_roots.push((storage_key.clone(), ch.as_ref().into()));
new_child_roots.push((prefix_storage_key.into_inner(), ch.as_ref().into()));
} else {
root_map = Some(map);
}
@@ -379,16 +371,18 @@ mod tests {
#[test]
fn in_memory_with_child_trie_only() {
let storage = InMemory::<BlakeTwo256>::default();
let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec());
let child_info = ChildInfo::new_default(b"1");
let child_info = &child_info;
let mut storage = storage.update(
vec![(
Some((b"1".to_vec(), child_info.clone())),
Some(child_info.clone()),
vec![(b"2".to_vec(), Some(b"3".to_vec()))]
)]
);
let trie_backend = storage.as_trie_backend().unwrap();
assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(),
assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(),
Some(b"3".to_vec()));
assert!(trie_backend.storage(b"1").unwrap().is_some());
let storage_key = child_info.prefixed_storage_key();
assert!(trie_backend.storage(storage_key.as_slice()).unwrap().is_some());
}
}
+26 -32
View File
@@ -606,8 +606,7 @@ where
/// Generate child storage read proof.
pub fn prove_child_read<B, H, I>(
mut backend: B,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
keys: I,
) -> Result<StorageProof, Box<dyn Error>>
where
@@ -619,7 +618,7 @@ where
{
let trie_backend = backend.as_trie_backend()
.ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box<dyn Error>)?;
prove_child_read_on_trie_backend(trie_backend, storage_key, child_info, keys)
prove_child_read_on_trie_backend(trie_backend, child_info, keys)
}
/// Generate storage read proof on pre-created trie backend.
@@ -646,8 +645,7 @@ where
/// Generate storage read proof on pre-created trie backend.
pub fn prove_child_read_on_trie_backend<S, H, I>(
trie_backend: &TrieBackend<S, H>,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
keys: I,
) -> Result<StorageProof, Box<dyn Error>>
where
@@ -660,7 +658,7 @@ where
let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend);
for key in keys.into_iter() {
proving_backend
.child_storage(storage_key, child_info.clone(), key.as_ref())
.child_storage(child_info, key.as_ref())
.map_err(|e| Box::new(e) as Box<dyn Error>)?;
}
Ok(proving_backend.extract_proof())
@@ -691,7 +689,7 @@ where
pub fn read_child_proof_check<H, I>(
root: H::Out,
proof: StorageProof,
storage_key: &[u8],
child_info: &ChildInfo,
keys: I,
) -> Result<HashMap<Vec<u8>, Option<Vec<u8>>>, Box<dyn Error>>
where
@@ -705,7 +703,7 @@ where
for key in keys.into_iter() {
let value = read_child_proof_check_on_proving_backend(
&proving_backend,
storage_key,
child_info,
key.as_ref(),
)?;
result.insert(key.as_ref().to_vec(), value);
@@ -728,15 +726,14 @@ where
/// Check child storage read proof on pre-created proving backend.
pub fn read_child_proof_check_on_proving_backend<H>(
proving_backend: &TrieBackend<MemoryDB<H>, H>,
storage_key: &[u8],
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Box<dyn Error>>
where
H: Hasher,
H::Out: Ord + Codec,
{
// Not a prefixed memory db, using empty unique id and include root resolution.
proving_backend.child_storage(storage_key, ChildInfo::new_default(&[]), key)
proving_backend.child_storage(child_info, key)
.map_err(|e| Box::new(e) as Box<dyn Error>)
}
@@ -748,7 +745,7 @@ mod tests {
use super::*;
use super::ext::Ext;
use super::changes_trie::Configuration as ChangesTrieConfig;
use sp_core::{map, traits::{Externalities, RuntimeCode}, storage::ChildStorageKey};
use sp_core::{map, traits::{Externalities, RuntimeCode}};
use sp_runtime::traits::BlakeTwo256;
#[derive(Clone)]
@@ -759,8 +756,6 @@ mod tests {
fallback_succeeds: bool,
}
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1");
impl CodeExecutor for DummyCodeExecutor {
type Error = u8;
@@ -1003,6 +998,8 @@ mod tests {
#[test]
fn set_child_storage_works() {
let child_info = ChildInfo::new_default(b"sub1");
let child_info = &child_info;
let mut state = InMemoryBackend::<BlakeTwo256>::default();
let backend = state.as_trie_backend().unwrap();
let mut overlay = OverlayedChanges::default();
@@ -1016,27 +1013,23 @@ mod tests {
);
ext.set_child_storage(
ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(),
CHILD_INFO_1,
child_info,
b"abc".to_vec(),
b"def".to_vec()
);
assert_eq!(
ext.child_storage(
ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(),
CHILD_INFO_1,
child_info,
b"abc"
),
Some(b"def".to_vec())
);
ext.kill_child_storage(
ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(),
CHILD_INFO_1,
child_info,
);
assert_eq!(
ext.child_storage(
ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(),
CHILD_INFO_1,
child_info,
b"abc"
),
None
@@ -1045,6 +1038,8 @@ mod tests {
#[test]
fn prove_read_and_proof_check_works() {
let child_info = ChildInfo::new_default(b"sub1");
let child_info = &child_info;
// fetch read proof from 'remote' full node
let remote_backend = trie_backend::tests::test_trie();
let remote_root = remote_backend.storage_root(::std::iter::empty()).0;
@@ -1071,20 +1066,19 @@ mod tests {
let remote_root = remote_backend.storage_root(::std::iter::empty()).0;
let remote_proof = prove_child_read(
remote_backend,
b":child_storage:default:sub1",
CHILD_INFO_1,
child_info,
&[b"value3"],
).unwrap();
let local_result1 = read_child_proof_check::<BlakeTwo256, _>(
remote_root,
remote_proof.clone(),
b":child_storage:default:sub1",
child_info,
&[b"value3"],
).unwrap();
let local_result2 = read_child_proof_check::<BlakeTwo256, _>(
remote_root,
remote_proof.clone(),
b":child_storage:default:sub1",
child_info,
&[b"value2"],
).unwrap();
assert_eq!(
@@ -1099,13 +1093,13 @@ mod tests {
#[test]
fn child_storage_uuid() {
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1");
const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2");
let child_info_1 = ChildInfo::new_default(b"sub_test1");
let child_info_2 = ChildInfo::new_default(b"sub_test2");
use crate::trie_backend::tests::test_trie;
let mut overlay = OverlayedChanges::default();
let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap();
let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap();
let mut transaction = {
let backend = test_trie();
let mut cache = StorageTransactionCache::default();
@@ -1116,8 +1110,8 @@ mod tests {
changes_trie::disabled_state::<_, u64>(),
None,
);
ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec());
ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec());
ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec());
ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec());
ext.storage_root();
cache.transaction.unwrap()
};
@@ -29,7 +29,7 @@ use crate::{
use std::iter::FromIterator;
use std::collections::{HashMap, BTreeMap, BTreeSet};
use codec::{Decode, Encode};
use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo};
use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo};
use std::{mem, ops};
use hash_db::Hasher;
@@ -79,8 +79,8 @@ pub struct OverlayedValue {
pub struct OverlayedChangeSet {
/// Top level storage changes.
pub top: BTreeMap<StorageKey, OverlayedValue>,
/// Child storage changes.
pub children: HashMap<StorageKey, (BTreeMap<StorageKey, OverlayedValue>, OwnedChildInfo)>,
/// Child storage changes. The map key is the child storage key without the common prefix.
pub children_default: HashMap<StorageKey, (BTreeMap<StorageKey, OverlayedValue>, ChildInfo)>,
}
/// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`].
@@ -174,7 +174,7 @@ impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet {
fn from_iter<T: IntoIterator<Item = (StorageKey, OverlayedValue)>>(iter: T) -> Self {
Self {
top: iter.into_iter().collect(),
children: Default::default(),
children_default: Default::default(),
}
}
}
@@ -182,13 +182,13 @@ impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet {
impl OverlayedChangeSet {
/// Whether the change set is empty.
pub fn is_empty(&self) -> bool {
self.top.is_empty() && self.children.is_empty()
self.top.is_empty() && self.children_default.is_empty()
}
/// Clear the change set.
pub fn clear(&mut self) {
self.top.clear();
self.children.clear();
self.children_default.clear();
}
}
@@ -219,8 +219,8 @@ impl OverlayedChanges {
/// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred
/// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose
/// value has been set.
pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option<Option<&[u8]>> {
if let Some(map) = self.prospective.children.get(storage_key) {
pub fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> Option<Option<&[u8]>> {
if let Some(map) = self.prospective.children_default.get(child_info.storage_key()) {
if let Some(val) = map.0.get(key) {
let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0);
self.stats.tally_read_modified(size_read);
@@ -228,7 +228,7 @@ impl OverlayedChanges {
}
}
if let Some(map) = self.committed.children.get(storage_key) {
if let Some(map) = self.committed.children_default.get(child_info.storage_key()) {
if let Some(val) = map.0.get(key) {
let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0);
self.stats.tally_read_modified(size_read);
@@ -260,15 +260,15 @@ impl OverlayedChanges {
/// `None` can be used to delete a value specified by the given key.
pub(crate) fn set_child_storage(
&mut self,
storage_key: StorageKey,
child_info: ChildInfo,
child_info: &ChildInfo,
key: StorageKey,
val: Option<StorageValue>,
) {
let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0);
self.stats.tally_write_overlay(size_write);
let extrinsic_index = self.extrinsic_index();
let map_entry = self.prospective.children.entry(storage_key)
let storage_key = child_info.storage_key().to_vec();
let map_entry = self.prospective.children_default.entry(storage_key)
.or_insert_with(|| (Default::default(), child_info.to_owned()));
let updatable = map_entry.1.try_update(child_info);
debug_assert!(updatable);
@@ -290,11 +290,11 @@ impl OverlayedChanges {
/// [`discard_prospective`]: #method.discard_prospective
pub(crate) fn clear_child_storage(
&mut self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
) {
let extrinsic_index = self.extrinsic_index();
let map_entry = self.prospective.children.entry(storage_key.to_vec())
let storage_key = child_info.storage_key();
let map_entry = self.prospective.children_default.entry(storage_key.to_vec())
.or_insert_with(|| (Default::default(), child_info.to_owned()));
let updatable = map_entry.1.try_update(child_info);
debug_assert!(updatable);
@@ -308,7 +308,7 @@ impl OverlayedChanges {
e.value = None;
});
if let Some((committed_map, _child_info)) = self.committed.children.get(storage_key) {
if let Some((committed_map, _child_info)) = self.committed.children_default.get(storage_key) {
for (key, value) in committed_map.iter() {
if !map_entry.0.contains_key(key) {
map_entry.0.insert(key.clone(), OverlayedValue {
@@ -364,12 +364,12 @@ impl OverlayedChanges {
pub(crate) fn clear_child_prefix(
&mut self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) {
let extrinsic_index = self.extrinsic_index();
let map_entry = self.prospective.children.entry(storage_key.to_vec())
let storage_key = child_info.storage_key();
let map_entry = self.prospective.children_default.entry(storage_key.to_vec())
.or_insert_with(|| (Default::default(), child_info.to_owned()));
let updatable = map_entry.1.try_update(child_info);
debug_assert!(updatable);
@@ -385,7 +385,7 @@ impl OverlayedChanges {
}
}
if let Some((child_committed, _child_info)) = self.committed.children.get(storage_key) {
if let Some((child_committed, _child_info)) = self.committed.children_default.get(storage_key) {
// Then do the same with keys from committed changes.
// NOTE that we are making changes in the prospective change set.
for key in child_committed.keys() {
@@ -422,8 +422,8 @@ impl OverlayedChanges {
.extend(prospective_extrinsics);
}
}
for (storage_key, (map, child_info)) in self.prospective.children.drain() {
let child_content = self.committed.children.entry(storage_key)
for (storage_key, (map, child_info)) in self.prospective.children_default.drain() {
let child_content = self.committed.children_default.entry(storage_key)
.or_insert_with(|| (Default::default(), child_info));
// No update to child info at this point (will be needed for deletion).
for (key, val) in map.into_iter() {
@@ -445,14 +445,14 @@ impl OverlayedChanges {
/// Will panic if there are any uncommitted prospective changes.
fn drain_committed(&mut self) -> (
impl Iterator<Item=(StorageKey, Option<StorageValue>)>,
impl Iterator<Item=(StorageKey, (impl Iterator<Item=(StorageKey, Option<StorageValue>)>, OwnedChildInfo))>,
impl Iterator<Item=(StorageKey, (impl Iterator<Item=(StorageKey, Option<StorageValue>)>, ChildInfo))>,
) {
assert!(self.prospective.is_empty());
(
std::mem::replace(&mut self.committed.top, Default::default())
.into_iter()
.map(|(k, v)| (k, v.value)),
std::mem::replace(&mut self.committed.children, Default::default())
std::mem::replace(&mut self.committed.children_default, Default::default())
.into_iter()
.map(|(sk, (v, ci))| (sk, (v.into_iter().map(|(k, v)| (k, v.value)), ci))),
)
@@ -549,21 +549,20 @@ impl OverlayedChanges {
) -> H::Out
where H::Out: Ord + Encode,
{
let child_storage_keys = self.prospective.children.keys()
.chain(self.committed.children.keys());
let child_storage_keys = self.prospective.children_default.keys()
.chain(self.committed.children_default.keys());
let child_delta_iter = child_storage_keys.map(|storage_key|
(
storage_key.clone(),
self.committed.children.get(storage_key)
self.default_child_info(storage_key).cloned()
.expect("child info initialized in either committed or prospective"),
self.committed.children_default.get(storage_key)
.into_iter()
.flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone())))
.chain(
self.prospective.children.get(storage_key)
self.prospective.children_default.get(storage_key)
.into_iter()
.flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone())))
),
self.child_info(storage_key).cloned()
.expect("child info initialized in either committed or prospective"),
)
);
@@ -610,11 +609,11 @@ impl OverlayedChanges {
/// Get child info for a storage key.
/// Take the latest value so prospective first.
pub fn child_info(&self, storage_key: &[u8]) -> Option<&OwnedChildInfo> {
if let Some((_, ci)) = self.prospective.children.get(storage_key) {
pub fn default_child_info(&self, storage_key: &[u8]) -> Option<&ChildInfo> {
if let Some((_, ci)) = self.prospective.children_default.get(storage_key) {
return Some(&ci);
}
if let Some((_, ci)) = self.committed.children.get(storage_key) {
if let Some((_, ci)) = self.committed.children_default.get(storage_key) {
return Some(&ci);
}
None
@@ -654,10 +653,10 @@ impl OverlayedChanges {
) -> Option<(&[u8], &OverlayedValue)> {
let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded);
let next_prospective_key = self.prospective.children.get(storage_key)
let next_prospective_key = self.prospective.children_default.get(storage_key)
.and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v)));
let next_committed_key = self.committed.children.get(storage_key)
let next_committed_key = self.committed.children_default.get(storage_key)
.and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v)));
match (next_committed_key, next_prospective_key) {
@@ -866,39 +865,40 @@ mod tests {
#[test]
fn next_child_storage_key_change_works() {
let child = b"Child1".to_vec();
let child_info = ChildInfo::new_default(b"uniqueid");
let child_info = ChildInfo::new_default(b"Child1");
let child_info = &child_info;
let child = child_info.storage_key();
let mut overlay = OverlayedChanges::default();
overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20]));
overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30]));
overlay.set_child_storage(child.clone(), child_info, vec![40], Some(vec![40]));
overlay.set_child_storage(child_info, vec![20], Some(vec![20]));
overlay.set_child_storage(child_info, vec![30], Some(vec![30]));
overlay.set_child_storage(child_info, vec![40], Some(vec![40]));
overlay.commit_prospective();
overlay.set_child_storage(child.clone(), child_info, vec![10], Some(vec![10]));
overlay.set_child_storage(child.clone(), child_info, vec![30], None);
overlay.set_child_storage(child_info, vec![10], Some(vec![10]));
overlay.set_child_storage(child_info, vec![30], None);
// next_prospective < next_committed
let next_to_5 = overlay.next_child_storage_key_change(&child, &[5]).unwrap();
let next_to_5 = overlay.next_child_storage_key_change(child, &[5]).unwrap();
assert_eq!(next_to_5.0.to_vec(), vec![10]);
assert_eq!(next_to_5.1.value, Some(vec![10]));
// next_committed < next_prospective
let next_to_10 = overlay.next_child_storage_key_change(&child, &[10]).unwrap();
let next_to_10 = overlay.next_child_storage_key_change(child, &[10]).unwrap();
assert_eq!(next_to_10.0.to_vec(), vec![20]);
assert_eq!(next_to_10.1.value, Some(vec![20]));
// next_committed == next_prospective
let next_to_20 = overlay.next_child_storage_key_change(&child, &[20]).unwrap();
let next_to_20 = overlay.next_child_storage_key_change(child, &[20]).unwrap();
assert_eq!(next_to_20.0.to_vec(), vec![30]);
assert_eq!(next_to_20.1.value, None);
// next_committed, no next_prospective
let next_to_30 = overlay.next_child_storage_key_change(&child, &[30]).unwrap();
let next_to_30 = overlay.next_child_storage_key_change(child, &[30]).unwrap();
assert_eq!(next_to_30.0.to_vec(), vec![40]);
assert_eq!(next_to_30.1.value, Some(vec![40]));
overlay.set_child_storage(child.clone(), child_info, vec![50], Some(vec![50]));
overlay.set_child_storage(child_info, vec![50], Some(vec![50]));
// next_prospective, no next_committed
let next_to_40 = overlay.next_child_storage_key_change(&child, &[40]).unwrap();
let next_to_40 = overlay.next_child_storage_key_change(child, &[40]).unwrap();
assert_eq!(next_to_40.0.to_vec(), vec![50]);
assert_eq!(next_to_40.1.value, Some(vec![50]));
}
@@ -22,7 +22,7 @@ use codec::{Decode, Codec};
use log::debug;
use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix};
use sp_trie::{
MemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with,
MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with,
record_all_keys, StorageProof,
};
pub use sp_trie::Recorder;
@@ -67,13 +67,13 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H>
/// Produce proof for a child key query.
pub fn child_storage(
&mut self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8]
) -> Result<Option<Vec<u8>>, String> {
let storage_key = child_info.storage_key();
let root = self.storage(storage_key)?
.and_then(|r| Decode::decode(&mut &r[..]).ok())
.unwrap_or(default_child_trie_root::<Layout<H>>(storage_key));
.unwrap_or(empty_child_trie_root::<Layout<H>>());
let mut read_overlay = S::Overlay::default();
let eph = Ephemeral::new(
@@ -84,7 +84,6 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H>
let map_e = |e| format!("Trie lookup error: {}", e);
read_child_trie_value_with::<Layout<H>, _, _>(
storage_key,
child_info.keyspace(),
&eph,
&root.as_ref(),
@@ -201,20 +200,18 @@ impl<'a, S, H> Backend<H> for ProvingBackend<'a, S, H>
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.0.child_storage(storage_key, child_info, key)
self.0.child_storage(child_info, key)
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
f: F,
) {
self.0.for_keys_in_child_storage(storage_key, child_info, f)
self.0.for_keys_in_child_storage(child_info, f)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
@@ -223,11 +220,10 @@ impl<'a, S, H> Backend<H> for ProvingBackend<'a, S, H>
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<Vec<u8>>, Self::Error> {
self.0.next_child_storage_key(storage_key, child_info, key)
self.0.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
@@ -240,12 +236,11 @@ impl<'a, S, H> Backend<H> for ProvingBackend<'a, S, H>
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.0.for_child_keys_with_prefix(storage_key, child_info, prefix, f)
self.0.for_child_keys_with_prefix( child_info, prefix, f)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
@@ -258,11 +253,10 @@ impl<'a, S, H> Backend<H> for ProvingBackend<'a, S, H>
fn child_keys(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
) -> Vec<Vec<u8>> {
self.0.child_keys(storage_key, child_info, prefix)
self.0.child_keys(child_info, prefix)
}
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
@@ -273,15 +267,14 @@ impl<'a, S, H> Backend<H> for ProvingBackend<'a, S, H>
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (H::Out, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
self.0.child_storage_root(storage_key, child_info, delta)
self.0.child_storage_root(child_info, delta)
}
fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { }
@@ -314,14 +307,10 @@ mod tests {
use crate::InMemoryBackend;
use crate::trie_backend::tests::test_trie;
use super::*;
use sp_core::storage::ChildStorageKey;
use crate::proving_backend::create_proof_check_backend;
use sp_trie::PrefixedMemoryDB;
use sp_runtime::traits::BlakeTwo256;
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1");
const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2");
fn test_proving<'a>(
trie_backend: &'a TrieBackend<PrefixedMemoryDB<BlakeTwo256>,BlakeTwo256>,
) -> ProvingBackend<'a, PrefixedMemoryDB<BlakeTwo256>, BlakeTwo256> {
@@ -389,33 +378,33 @@ mod tests {
#[test]
fn proof_recorded_and_checked_with_child() {
let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap();
let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap();
let own1 = subtrie1.into_owned();
let own2 = subtrie2.into_owned();
let child_info_1 = ChildInfo::new_default(b"sub1");
let child_info_2 = ChildInfo::new_default(b"sub2");
let child_info_1 = &child_info_1;
let child_info_2 = &child_info_2;
let contents = vec![
(None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()),
(Some((own1.clone(), CHILD_INFO_1.to_owned())),
(Some(child_info_1.clone()),
(28..65).map(|i| (vec![i], Some(vec![i]))).collect()),
(Some((own2.clone(), CHILD_INFO_2.to_owned())),
(Some(child_info_2.clone()),
(10..15).map(|i| (vec![i], Some(vec![i]))).collect()),
];
let in_memory = InMemoryBackend::<BlakeTwo256>::default();
let mut in_memory = in_memory.update(contents);
let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>(
::std::iter::empty(),
in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned()))
in_memory.child_storage_infos().map(|k|(k.to_owned(), Vec::new()))
).0;
(0..64).for_each(|i| assert_eq!(
in_memory.storage(&[i]).unwrap().unwrap(),
vec![i]
));
(28..65).for_each(|i| assert_eq!(
in_memory.child_storage(&own1[..], CHILD_INFO_1, &[i]).unwrap().unwrap(),
in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(),
vec![i]
));
(10..15).for_each(|i| assert_eq!(
in_memory.child_storage(&own2[..], CHILD_INFO_2, &[i]).unwrap().unwrap(),
in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(),
vec![i]
));
@@ -443,7 +432,7 @@ mod tests {
assert_eq!(proof_check.storage(&[64]).unwrap(), None);
let proving = ProvingBackend::new(trie);
assert_eq!(proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), Ok(Some(vec![64])));
assert_eq!(proving.child_storage(child_info_1, &[64]), Ok(Some(vec![64])));
let proof = proving.extract_proof();
let proof_check = create_proof_check_backend::<BlakeTwo256>(
@@ -451,7 +440,7 @@ mod tests {
proof
).unwrap();
assert_eq!(
proof_check.child_storage(&own1[..], CHILD_INFO_1, &[64]).unwrap().unwrap(),
proof_check.child_storage(child_info_1, &[64]).unwrap().unwrap(),
vec![64]
);
}
@@ -93,7 +93,7 @@ impl<H: Hasher, N: ChangesTrieBlockNumber> TestExternalities<H, N>
overlay.set_collect_extrinsics(changes_trie_config.is_some());
assert!(storage.top.keys().all(|key| !is_child_storage_key(key)));
assert!(storage.children.keys().all(|key| is_child_storage_key(key)));
assert!(storage.children_default.keys().all(|key| is_child_storage_key(key)));
storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode());
storage.top.insert(CODE.to_vec(), code.to_vec());
@@ -133,11 +133,11 @@ impl<H: Hasher, N: ChangesTrieBlockNumber> TestExternalities<H, N>
.map(|(k, v)| (k, v.value)).collect();
let mut transaction = vec![(None, top)];
self.overlay.committed.children.clone().into_iter()
.chain(self.overlay.prospective.children.clone().into_iter())
.for_each(|(keyspace, (map, child_info))| {
self.overlay.committed.children_default.clone().into_iter()
.chain(self.overlay.prospective.children_default.clone().into_iter())
.for_each(|(_storage_key, (map, child_info))| {
transaction.push((
Some((keyspace, child_info)),
Some(child_info),
map.into_iter()
.map(|(k, v)| (k, v.value))
.collect::<Vec<_>>(),
@@ -18,9 +18,9 @@
use log::{warn, debug};
use hash_db::Hasher;
use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root};
use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root};
use sp_trie::trie_types::{TrieDB, TrieError, Layout};
use sp_core::storage::ChildInfo;
use sp_core::storage::{ChildInfo, ChildType};
use codec::{Codec, Decode};
use crate::{
StorageKey, StorageValue, Backend,
@@ -80,11 +80,10 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<StorageValue>, Self::Error> {
self.essence.child_storage(storage_key, child_info, key)
self.essence.child_storage(child_info, key)
}
fn next_storage_key(&self, key: &[u8]) -> Result<Option<StorageKey>, Self::Error> {
@@ -93,11 +92,10 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<StorageKey>, Self::Error> {
self.essence.next_child_storage_key(storage_key, child_info, key)
self.essence.next_child_storage_key(child_info, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
@@ -110,21 +108,19 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
f: F,
) {
self.essence.for_keys_in_child_storage(storage_key, child_info, f)
self.essence.for_keys_in_child_storage(child_info, f)
}
fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
f: F,
) {
self.essence.for_child_keys_with_prefix(storage_key, child_info, prefix, f)
self.essence.for_child_keys_with_prefix(child_info, prefix, f)
}
fn pairs(&self) -> Vec<(StorageKey, StorageValue)> {
@@ -194,18 +190,20 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
fn child_storage_root<I>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
delta: I,
) -> (H::Out, bool, Self::Transaction)
where
I: IntoIterator<Item=(StorageKey, Option<StorageValue>)>,
H::Out: Ord,
{
let default_root = default_child_trie_root::<Layout<H>>(storage_key);
let default_root = match child_info.child_type() {
ChildType::ParentKeyId => empty_child_trie_root::<Layout<H>>()
};
let mut write_overlay = S::Overlay::default();
let mut root = match self.storage(storage_key) {
let prefixed_storage_key = child_info.prefixed_storage_key();
let mut root = match self.storage(prefixed_storage_key.as_slice()) {
Ok(value) =>
value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()),
Err(e) => {
@@ -221,7 +219,6 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
);
match child_delta_trie_root::<Layout<H>, _, _, _, _, _>(
storage_key,
child_info.keyspace(),
&mut eph,
root,
@@ -257,16 +254,14 @@ pub mod tests {
use sp_runtime::traits::BlakeTwo256;
use super::*;
const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1";
const CHILD_UUID_1: &[u8] = b"unique_id_1";
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1);
const CHILD_KEY_1: &[u8] = b"sub1";
fn test_db() -> (PrefixedMemoryDB<BlakeTwo256>, H256) {
let child_info = ChildInfo::new_default(CHILD_KEY_1);
let mut root = H256::default();
let mut mdb = PrefixedMemoryDB::<BlakeTwo256>::default();
{
let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1);
let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace());
let mut trie = TrieDBMut::new(&mut mdb, &mut root);
trie.insert(b"value3", &[142]).expect("insert failed");
trie.insert(b"value4", &[124]).expect("insert failed");
@@ -276,7 +271,8 @@ pub mod tests {
let mut sub_root = Vec::new();
root.encode_to(&mut sub_root);
let mut trie = TrieDBMut::new(&mut mdb, &mut root);
trie.insert(CHILD_KEY_1, &sub_root[..]).expect("insert failed");
trie.insert(child_info.prefixed_storage_key().as_slice(), &sub_root[..])
.expect("insert failed");
trie.insert(b"key", b"value").expect("insert failed");
trie.insert(b"value1", &[42]).expect("insert failed");
trie.insert(b"value2", &[24]).expect("insert failed");
@@ -302,7 +298,7 @@ pub mod tests {
fn read_from_child_storage_returns_some() {
let test_trie = test_trie();
assert_eq!(
test_trie.child_storage(CHILD_KEY_1, CHILD_INFO_1, b"value3").unwrap(),
test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(),
Some(vec![142u8]),
);
}
@@ -22,7 +22,7 @@ use std::sync::Arc;
use log::{debug, warn};
use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix};
use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue,
default_child_trie_root, read_trie_value, read_child_trie_value,
empty_child_trie_root, read_trie_value, read_child_trie_value,
for_keys_in_child_trie, KeySpacedDB, TrieDBIterator};
use sp_trie::trie_types::{TrieDB, TrieError, Layout};
use crate::{backend::Consolidate, StorageKey, StorageValue};
@@ -71,15 +71,19 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
self.next_storage_key_from_root(&self.root, None, key)
}
/// Access the root of the child storage in its parent trie
fn child_root(&self, child_info: &ChildInfo) -> Result<Option<StorageValue>, String> {
self.storage(child_info.prefixed_storage_key().as_slice())
}
/// Return the next key in the child trie i.e. the minimum key that is strictly superior to
/// `key` in lexicographic order.
pub fn next_child_storage_key(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<StorageKey>, String> {
let child_root = match self.storage(storage_key)? {
let child_root = match self.child_root(child_info)? {
Some(child_root) => child_root,
None => return Ok(None),
};
@@ -87,7 +91,7 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
let mut hash = H::Out::default();
if child_root.len() != hash.as_ref().len() {
return Err(format!("Invalid child storage hash at {:?}", storage_key));
return Err(format!("Invalid child storage hash at {:?}", child_info.storage_key()));
}
// note: child_root and hash must be same size, panics otherwise.
hash.as_mut().copy_from_slice(&child_root[..]);
@@ -99,7 +103,7 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
fn next_storage_key_from_root(
&self,
root: &H::Out,
child_info: Option<ChildInfo>,
child_info: Option<&ChildInfo>,
key: &[u8],
) -> Result<Option<StorageKey>, String> {
let mut read_overlay = S::Overlay::default();
@@ -161,12 +165,11 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
/// Get the value of child storage at given key.
pub fn child_storage(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
key: &[u8],
) -> Result<Option<StorageValue>, String> {
let root = self.storage(storage_key)?
.unwrap_or(default_child_trie_root::<Layout<H>>(storage_key).encode());
let root = self.child_root(child_info)?
.unwrap_or(empty_child_trie_root::<Layout<H>>().encode());
let mut read_overlay = S::Overlay::default();
let eph = Ephemeral {
@@ -176,19 +179,18 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
let map_e = |e| format!("Trie lookup error: {}", e);
read_child_trie_value::<Layout<H>, _>(storage_key, child_info.keyspace(), &eph, &root, key)
read_child_trie_value::<Layout<H>, _>(child_info.keyspace(), &eph, &root, key)
.map_err(map_e)
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
pub fn for_keys_in_child_storage<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
f: F,
) {
let root = match self.storage(storage_key) {
Ok(v) => v.unwrap_or(default_child_trie_root::<Layout<H>>(storage_key).encode()),
let root = match self.child_root(child_info) {
Ok(v) => v.unwrap_or(empty_child_trie_root::<Layout<H>>().encode()),
Err(e) => {
debug!(target: "trie", "Error while iterating child storage: {}", e);
return;
@@ -202,7 +204,6 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
};
if let Err(e) = for_keys_in_child_trie::<Layout<H>, _, Ephemeral<S, H>>(
storage_key,
child_info.keyspace(),
&eph,
&root,
@@ -215,13 +216,12 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
/// Execute given closure for all keys starting with prefix.
pub fn for_child_keys_with_prefix<F: FnMut(&[u8])>(
&self,
storage_key: &[u8],
child_info: ChildInfo,
child_info: &ChildInfo,
prefix: &[u8],
mut f: F,
) {
let root_vec = match self.storage(storage_key) {
Ok(v) => v.unwrap_or(default_child_trie_root::<Layout<H>>(storage_key).encode()),
let root_vec = match self.child_root(child_info) {
Ok(v) => v.unwrap_or(empty_child_trie_root::<Layout<H>>().encode()),
Err(e) => {
debug!(target: "trie", "Error while iterating child storage: {}", e);
return;
@@ -242,7 +242,7 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
root: &H::Out,
prefix: &[u8],
mut f: F,
child_info: Option<ChildInfo>,
child_info: Option<&ChildInfo>,
) {
let mut read_overlay = S::Overlay::default();
let eph = Ephemeral {
@@ -436,7 +436,8 @@ mod test {
#[test]
fn next_storage_key_and_next_child_storage_key_work() {
let child_info = ChildInfo::new_default(b"uniqueid");
let child_info = ChildInfo::new_default(b"MyChild");
let child_info = &child_info;
// Contains values
let mut root_1 = H256::default();
// Contains child trie
@@ -460,7 +461,8 @@ mod test {
}
{
let mut trie = TrieDBMut::new(&mut mdb, &mut root_2);
trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed");
trie.insert(child_info.prefixed_storage_key().as_slice(), root_1.as_ref())
.expect("insert failed");
};
let essence_1 = TrieBackendEssence::new(mdb, root_1);
@@ -475,19 +477,19 @@ mod test {
let essence_2 = TrieBackendEssence::new(mdb, root_2);
assert_eq!(
essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec()))
essence_2.next_child_storage_key(child_info, b"2"), Ok(Some(b"3".to_vec()))
);
assert_eq!(
essence_2.next_child_storage_key(b"MyChild", child_info, b"3"), Ok(Some(b"4".to_vec()))
essence_2.next_child_storage_key(child_info, b"3"), Ok(Some(b"4".to_vec()))
);
assert_eq!(
essence_2.next_child_storage_key(b"MyChild", child_info, b"4"), Ok(Some(b"6".to_vec()))
essence_2.next_child_storage_key(child_info, b"4"), Ok(Some(b"6".to_vec()))
);
assert_eq!(
essence_2.next_child_storage_key(b"MyChild", child_info, b"5"), Ok(Some(b"6".to_vec()))
essence_2.next_child_storage_key(child_info, b"5"), Ok(Some(b"6".to_vec()))
);
assert_eq!(
essence_2.next_child_storage_key(b"MyChild", child_info, b"6"), Ok(None)
essence_2.next_child_storage_key(child_info, b"6"), Ok(None)
);
}
}
+1
View File
@@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"]
sp-std = { version = "2.0.0-dev", default-features = false, path = "../std" }
serde = { version = "1.0.101", optional = true, features = ["derive"] }
impl-serde = { version = "0.2.3", optional = true }
ref-cast = "1.0.0"
sp-debug-derive = { version = "2.0.0-dev", path = "../debug-derive" }
[features]
+184 -147
View File
@@ -22,7 +22,9 @@
use serde::{Serialize, Deserialize};
use sp_debug_derive::RuntimeDebug;
use sp_std::{vec::Vec, borrow::Cow};
use sp_std::vec::Vec;
use sp_std::ops::{Deref, DerefMut};
use ref_cast::RefCast;
/// Storage key.
#[derive(PartialEq, Eq, RuntimeDebug)]
@@ -32,6 +34,51 @@ pub struct StorageKey(
pub Vec<u8>,
);
/// Storage key of a child trie, it contains the prefix to the key.
#[derive(PartialEq, Eq, RuntimeDebug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))]
#[repr(transparent)]
#[derive(RefCast)]
pub struct PrefixedStorageKey(
#[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))]
Vec<u8>,
);
impl Deref for PrefixedStorageKey {
type Target = Vec<u8>;
fn deref(&self) -> &Vec<u8> {
&self.0
}
}
impl DerefMut for PrefixedStorageKey {
fn deref_mut(&mut self) -> &mut Vec<u8> {
&mut self.0
}
}
impl PrefixedStorageKey {
/// Create a prefixed storage key from its byte array
/// representation.
pub fn new(inner: Vec<u8>) -> Self {
PrefixedStorageKey(inner)
}
/// Create a prefixed storage key reference.
pub fn new_ref(inner: &Vec<u8>) -> &Self {
PrefixedStorageKey::ref_cast(inner)
}
/// Get inner key, this should
/// only be needed when writing
/// into parent trie to avoid an
/// allocation.
pub fn into_inner(self) -> Vec<u8> {
self.0
}
}
/// Storage data associated to a [`StorageKey`].
#[derive(PartialEq, Eq, RuntimeDebug)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))]
@@ -53,7 +100,7 @@ pub struct StorageChild {
pub data: StorageMap,
/// Associated child info for a child
/// trie.
pub child_info: OwnedChildInfo,
pub child_info: ChildInfo,
}
#[cfg(feature = "std")]
@@ -62,8 +109,11 @@ pub struct StorageChild {
pub struct Storage {
/// Top trie storage data.
pub top: StorageMap,
/// Children trie storage data by storage key.
pub children: std::collections::HashMap<Vec<u8>, StorageChild>,
/// Children trie storage data.
/// The key does not including prefix, for the `default`
/// trie kind, so this is exclusively for the `ChildType::ParentKeyId`
/// tries.
pub children_default: std::collections::HashMap<Vec<u8>, StorageChild>,
}
/// Storage change set
@@ -106,200 +156,187 @@ pub mod well_known_keys {
// Other code might depend on this, so be careful changing this.
key.starts_with(CHILD_STORAGE_KEY_PREFIX)
}
/// Determine whether a child trie key is valid.
///
/// For now, the only valid child trie keys are those starting with `:child_storage:default:`.
///
/// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them.
pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool {
let has_right_prefix = storage_key.starts_with(b":child_storage:default:");
if has_right_prefix {
// This is an attempt to catch a change of `is_child_storage_key`, which
// just checks if the key has prefix `:child_storage:` at the moment of writing.
debug_assert!(
is_child_storage_key(&storage_key),
"`is_child_trie_key_valid` is a subset of `is_child_storage_key`",
);
}
has_right_prefix
}
}
/// A wrapper around a child storage key.
///
/// This wrapper ensures that the child storage key is correct and properly used. It is
/// impossible to create an instance of this struct without providing a correct `storage_key`.
pub struct ChildStorageKey<'a> {
storage_key: Cow<'a, [u8]>,
}
impl<'a> ChildStorageKey<'a> {
/// Create new instance of `Self`.
fn new(storage_key: Cow<'a, [u8]>) -> Option<Self> {
if well_known_keys::is_child_trie_key_valid(&storage_key) {
Some(ChildStorageKey { storage_key })
} else {
None
}
}
/// Create a new `ChildStorageKey` from a vector.
///
/// `storage_key` need to start with `:child_storage:default:`
/// See `is_child_trie_key_valid` for more details.
pub fn from_vec(key: Vec<u8>) -> Option<Self> {
Self::new(Cow::Owned(key))
}
/// Create a new `ChildStorageKey` from a slice.
///
/// `storage_key` need to start with `:child_storage:default:`
/// See `is_child_trie_key_valid` for more details.
pub fn from_slice(key: &'a [u8]) -> Option<Self> {
Self::new(Cow::Borrowed(key))
}
/// Get access to the byte representation of the storage key.
///
/// This key is guaranteed to be correct.
pub fn as_ref(&self) -> &[u8] {
&*self.storage_key
}
/// Destruct this instance into an owned vector that represents the storage key.
///
/// This key is guaranteed to be correct.
pub fn into_owned(self) -> Vec<u8> {
self.storage_key.into_owned()
}
}
#[derive(Clone, Copy)]
/// Information related to a child state.
pub enum ChildInfo<'a> {
Default(ChildTrie<'a>),
}
/// Owned version of `ChildInfo`.
/// To be use in persistence layers.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))]
pub enum OwnedChildInfo {
Default(OwnedChildTrie),
pub enum ChildInfo {
/// This is the one used by default.
ParentKeyId(ChildTrieParentKeyId),
}
impl<'a> ChildInfo<'a> {
/// Instantiates information for a default child trie.
pub const fn new_default(unique_id: &'a[u8]) -> Self {
ChildInfo::Default(ChildTrie {
data: unique_id,
impl ChildInfo {
/// Instantiates child information for a default child trie
/// of kind `ChildType::ParentKeyId`, using an unprefixed parent
/// storage key.
pub fn new_default(storage_key: &[u8]) -> Self {
let data = storage_key.to_vec();
ChildInfo::ParentKeyId(ChildTrieParentKeyId { data })
}
/// Same as `new_default` but with `Vec<u8>` as input.
pub fn new_default_from_vec(storage_key: Vec<u8>) -> Self {
ChildInfo::ParentKeyId(ChildTrieParentKeyId {
data: storage_key,
})
}
/// Instantiates a owned version of this child info.
pub fn to_owned(&self) -> OwnedChildInfo {
/// Try to update with another instance, return false if both instance
/// are not compatible.
pub fn try_update(&mut self, other: &ChildInfo) -> bool {
match self {
ChildInfo::Default(ChildTrie { data })
=> OwnedChildInfo::Default(OwnedChildTrie {
data: data.to_vec(),
}),
ChildInfo::ParentKeyId(child_trie) => child_trie.try_update(other),
}
}
/// Create child info from a linear byte packed value and a given type.
pub fn resolve_child_info(child_type: u32, data: &'a[u8]) -> Option<Self> {
match child_type {
x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)),
_ => None,
}
}
/// Return a single byte vector containing packed child info content and its child info type.
/// This can be use as input for `resolve_child_info`.
pub fn info(&self) -> (&[u8], u32) {
match self {
ChildInfo::Default(ChildTrie {
data,
}) => (data, ChildType::CryptoUniqueId as u32),
}
}
/// Return byte sequence (keyspace) that can be use by underlying db to isolate keys.
/// Returns byte sequence (keyspace) that can be use by underlying db to isolate keys.
/// This is a unique id of the child trie. The collision resistance of this value
/// depends on the type of child info use. For `ChildInfo::Default` it is and need to be.
pub fn keyspace(&self) -> &[u8] {
match self {
ChildInfo::Default(ChildTrie {
ChildInfo::ParentKeyId(..) => self.storage_key(),
}
}
/// Returns a reference to the location in the direct parent of
/// this trie but without the common prefix for this kind of
/// child trie.
pub fn storage_key(&self) -> &[u8] {
match self {
ChildInfo::ParentKeyId(ChildTrieParentKeyId {
data,
}) => &data[..],
}
}
/// Return a the full location in the direct parent of
/// this trie.
pub fn prefixed_storage_key(&self) -> PrefixedStorageKey {
match self {
ChildInfo::ParentKeyId(ChildTrieParentKeyId {
data,
}) => ChildType::ParentKeyId.new_prefixed_key(data.as_slice()),
}
}
/// Returns a the full location in the direct parent of
/// this trie.
pub fn into_prefixed_storage_key(self) -> PrefixedStorageKey {
match self {
ChildInfo::ParentKeyId(ChildTrieParentKeyId {
mut data,
}) => {
ChildType::ParentKeyId.do_prefix_key(&mut data);
PrefixedStorageKey(data)
},
}
}
/// Returns the type for this child info.
pub fn child_type(&self) -> ChildType {
match self {
ChildInfo::ParentKeyId(..) => ChildType::ParentKeyId,
}
}
}
/// Type of child.
/// It does not strictly define different child type, it can also
/// be related to technical consideration or api variant.
#[repr(u32)]
#[derive(Clone, Copy, PartialEq)]
#[cfg_attr(feature = "std", derive(Debug))]
pub enum ChildType {
/// Default, it uses a cryptographic strong unique id as input.
CryptoUniqueId = 1,
/// If runtime module ensures that the child key is a unique id that will
/// only be used once, its parent key is used as a child trie unique id.
ParentKeyId = 1,
}
impl OwnedChildInfo {
/// Instantiates info for a default child trie.
pub fn new_default(unique_id: Vec<u8>) -> Self {
OwnedChildInfo::Default(OwnedChildTrie {
data: unique_id,
impl ChildType {
/// Try to get a child type from its `u32` representation.
pub fn new(repr: u32) -> Option<ChildType> {
Some(match repr {
r if r == ChildType::ParentKeyId as u32 => ChildType::ParentKeyId,
_ => return None,
})
}
/// Try to update with another instance, return false if both instance
/// are not compatible.
pub fn try_update(&mut self, other: ChildInfo) -> bool {
match self {
OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other),
/// Transform a prefixed key into a tuple of the child type
/// and the unprefixed representation of the key.
pub fn from_prefixed_key<'a>(storage_key: &'a PrefixedStorageKey) -> Option<(Self, &'a [u8])> {
let match_type = |storage_key: &'a [u8], child_type: ChildType| {
let prefix = child_type.parent_prefix();
if storage_key.starts_with(prefix) {
Some((child_type, &storage_key[prefix.len()..]))
} else {
None
}
};
match_type(storage_key, ChildType::ParentKeyId)
}
/// Produce a prefixed key for a given child type.
fn new_prefixed_key(&self, key: &[u8]) -> PrefixedStorageKey {
let parent_prefix = self.parent_prefix();
let mut result = Vec::with_capacity(parent_prefix.len() + key.len());
result.extend_from_slice(parent_prefix);
result.extend_from_slice(key);
PrefixedStorageKey(result)
}
/// Prefixes a vec with the prefix for this child type.
fn do_prefix_key(&self, key: &mut Vec<u8>) {
let parent_prefix = self.parent_prefix();
let key_len = key.len();
if parent_prefix.len() > 0 {
key.resize(key_len + parent_prefix.len(), 0);
key.copy_within(..key_len, parent_prefix.len());
key[..parent_prefix.len()].copy_from_slice(parent_prefix);
}
}
/// Get `ChildInfo` reference to this owned child info.
pub fn as_ref(&self) -> ChildInfo {
/// Returns the location reserved for this child trie in their parent trie if there
/// is one.
pub fn parent_prefix(&self) -> &'static [u8] {
match self {
OwnedChildInfo::Default(OwnedChildTrie { data })
=> ChildInfo::Default(ChildTrie {
data: data.as_slice(),
}),
&ChildType::ParentKeyId => DEFAULT_CHILD_TYPE_PARENT_PREFIX,
}
}
}
/// A child trie of default type.
/// Default is the same implementation as the top trie.
/// It share its trie node storage with any kind of key,
/// and its unique id needs to be collision free (eg strong
/// crypto hash).
#[derive(Clone, Copy)]
pub struct ChildTrie<'a> {
/// Data containing unique id.
/// Unique id must but unique and free of any possible key collision
/// (depending on its storage behavior).
data: &'a[u8],
}
/// Owned version of default child trie `ChildTrie`.
/// It uses the same default implementation as the top trie,
/// top trie being a child trie with no keyspace and no storage key.
/// Its keyspace is the variable (unprefixed) part of its storage key.
/// It shares its trie nodes backend storage with every other
/// child trie, so its storage key needs to be a unique id
/// that will be use only once.
/// Those unique id also required to be long enough to avoid any
/// unique id to be prefixed by an other unique id.
#[derive(Debug, Clone)]
#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))]
pub struct OwnedChildTrie {
/// See `ChildTrie` reference field documentation.
pub struct ChildTrieParentKeyId {
/// Data is the storage key without prefix.
data: Vec<u8>,
}
impl OwnedChildTrie {
impl ChildTrieParentKeyId {
/// Try to update with another instance, return false if both instance
/// are not compatible.
fn try_update(&mut self, other: ChildInfo) -> bool {
fn try_update(&mut self, other: &ChildInfo) -> bool {
match other {
ChildInfo::Default(other) => self.data[..] == other.data[..],
ChildInfo::ParentKeyId(other) => self.data[..] == other.data[..],
}
}
}
const DEFAULT_CHILD_TYPE_PARENT_PREFIX: &'static [u8] = b":child_storage:default:";
#[test]
fn test_prefix_default_child_info() {
let child_info = ChildInfo::new_default(b"any key");
let prefix = child_info.child_type().parent_prefix();
assert!(prefix.starts_with(well_known_keys::CHILD_STORAGE_KEY_PREFIX));
assert!(prefix.starts_with(DEFAULT_CHILD_TYPE_PARENT_PREFIX));
}
+2 -8
View File
@@ -211,9 +211,8 @@ pub fn read_trie_value_with<
Ok(TrieDB::<L>::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?)
}
/// Determine the default child trie root.
pub fn default_child_trie_root<L: TrieConfiguration>(
_storage_key: &[u8],
/// Determine the empty child trie root.
pub fn empty_child_trie_root<L: TrieConfiguration>(
) -> <L::Hash as Hasher>::Out {
L::trie_root::<_, Vec<u8>, Vec<u8>>(core::iter::empty())
}
@@ -221,7 +220,6 @@ pub fn default_child_trie_root<L: TrieConfiguration>(
/// Determine a child trie root given its ordered contents, closed form. H is the default hasher,
/// but a generic implementation may ignore this type parameter and use other hashers.
pub fn child_trie_root<L: TrieConfiguration, I, A, B>(
_storage_key: &[u8],
input: I,
) -> <L::Hash as Hasher>::Out
where
@@ -235,7 +233,6 @@ pub fn child_trie_root<L: TrieConfiguration, I, A, B>(
/// Determine a child trie root given a hash DB and delta values. H is the default hasher,
/// but a generic implementation may ignore this type parameter and use other hashers.
pub fn child_delta_trie_root<L: TrieConfiguration, I, A, B, DB, RD>(
_storage_key: &[u8],
keyspace: &[u8],
db: &mut DB,
root_data: RD,
@@ -270,7 +267,6 @@ pub fn child_delta_trie_root<L: TrieConfiguration, I, A, B, DB, RD>(
/// Call `f` for all keys in a child trie.
pub fn for_keys_in_child_trie<L: TrieConfiguration, F: FnMut(&[u8]), DB>(
_storage_key: &[u8],
keyspace: &[u8],
db: &DB,
root_slice: &[u8],
@@ -321,7 +317,6 @@ pub fn record_all_keys<L: TrieConfiguration, DB>(
/// Read a value from the child trie.
pub fn read_child_trie_value<L: TrieConfiguration, DB>(
_storage_key: &[u8],
keyspace: &[u8],
db: &DB,
root_slice: &[u8],
@@ -341,7 +336,6 @@ pub fn read_child_trie_value<L: TrieConfiguration, DB>(
/// Read a value from the child trie with given query.
pub fn read_child_trie_value_with<L: TrieConfiguration, Q: Query<L::Hash, Item=DBValue>, DB>(
_storage_key: &[u8],
keyspace: &[u8],
db: &DB,
root_slice: &[u8],
+10 -8
View File
@@ -40,7 +40,7 @@ pub use self::client_ext::{ClientExt, ClientBlockImportExt};
use std::sync::Arc;
use std::collections::HashMap;
use sp_core::storage::{well_known_keys, ChildInfo};
use sp_core::storage::ChildInfo;
use sp_runtime::traits::{Block as BlockT, BlakeTwo256};
use sc_client::LocalCallExecutor;
@@ -66,6 +66,8 @@ impl GenesisInit for () {
pub struct TestClientBuilder<Block: BlockT, Executor, Backend, G: GenesisInit> {
execution_strategies: ExecutionStrategies,
genesis_init: G,
/// The key is an unprefixed storage key, this only contains
/// default child trie content.
child_storage_extension: HashMap<Vec<u8>, StorageChild>,
backend: Arc<Backend>,
_executor: std::marker::PhantomData<Executor>,
@@ -129,17 +131,17 @@ impl<Block: BlockT, Executor, Backend, G: GenesisInit> TestClientBuilder<Block,
/// Extend child storage
pub fn add_child_storage(
mut self,
child_info: &ChildInfo,
key: impl AsRef<[u8]>,
child_key: impl AsRef<[u8]>,
child_info: ChildInfo,
value: impl AsRef<[u8]>,
) -> Self {
let entry = self.child_storage_extension.entry(key.as_ref().to_vec())
let storage_key = child_info.storage_key();
let entry = self.child_storage_extension.entry(storage_key.to_vec())
.or_insert_with(|| StorageChild {
data: Default::default(),
child_info: child_info.to_owned(),
child_info: child_info.clone(),
});
entry.data.insert(child_key.as_ref().to_vec(), value.as_ref().to_vec());
entry.data.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
self
}
@@ -189,8 +191,8 @@ impl<Block: BlockT, Executor, Backend, G: GenesisInit> TestClientBuilder<Block,
// Add some child storage keys.
for (key, child_content) in self.child_storage_extension {
storage.children.insert(
well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().cloned().chain(key).collect(),
storage.children_default.insert(
key,
StorageChild {
data: child_content.data.into_iter().collect(),
child_info: child_content.child_info,
+12 -9
View File
@@ -123,11 +123,12 @@ impl substrate_test_client::GenesisInit for GenesisParameters {
let mut storage = self.genesis_config().genesis_map();
let child_roots = storage.children.iter().map(|(sk, child_content)| {
let child_roots = storage.children_default.iter().map(|(_sk, child_content)| {
let state_root = <<<runtime::Block as BlockT>::Header as HeaderT>::Hashing as HashT>::trie_root(
child_content.data.clone().into_iter().collect()
);
(sk.clone(), state_root.encode())
let prefixed_storage_key = child_content.child_info.prefixed_storage_key();
(prefixed_storage_key.into_inner(), state_root.encode())
});
let state_root = <<<runtime::Block as BlockT>::Header as HeaderT>::Hashing as HashT>::trie_root(
storage.top.clone().into_iter().chain(child_roots).collect()
@@ -192,22 +193,21 @@ pub trait TestClientBuilderExt<B>: Sized {
/// # Panics
///
/// Panics if the key is empty.
fn add_extra_child_storage<SK: Into<Vec<u8>>, K: Into<Vec<u8>>, V: Into<Vec<u8>>>(
fn add_extra_child_storage<K: Into<Vec<u8>>, V: Into<Vec<u8>>>(
mut self,
storage_key: SK,
child_info: ChildInfo,
child_info: &ChildInfo,
key: K,
value: V,
) -> Self {
let storage_key = storage_key.into();
let storage_key = child_info.storage_key().to_vec();
let key = key.into();
assert!(!storage_key.is_empty());
assert!(!key.is_empty());
self.genesis_init_mut().extra_storage.children
self.genesis_init_mut().extra_storage.children_default
.entry(storage_key)
.or_insert_with(|| StorageChild {
data: Default::default(),
child_info: child_info.to_owned(),
child_info: child_info.clone(),
}).data.insert(key, value.into());
self
}
@@ -311,7 +311,10 @@ impl Fetcher<substrate_test_runtime::Block> for LightFetcher {
unimplemented!()
}
fn remote_read_child(&self, _: RemoteReadChildRequest<substrate_test_runtime::Header>) -> Self::RemoteReadResult {
fn remote_read_child(
&self,
_: RemoteReadChildRequest<substrate_test_runtime::Header>,
) -> Self::RemoteReadResult {
unimplemented!()
}
@@ -73,7 +73,7 @@ impl GenesisConfig {
map.extend(self.extra_storage.top.clone().into_iter());
// Assimilate the system genesis config.
let mut storage = Storage { top: map, children: self.extra_storage.children.clone()};
let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone()};
let mut config = system::GenesisConfig::default();
config.authorities = self.authorities.clone();
config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis");
@@ -85,7 +85,7 @@ impl GenesisConfig {
pub fn insert_genesis_block(
storage: &mut Storage,
) -> sp_core::hash::H256 {
let child_roots = storage.children.iter().map(|(sk, child_content)| {
let child_roots = storage.children_default.iter().map(|(sk, child_content)| {
let state_root = <<<crate::Block as BlockT>::Header as HeaderT>::Hashing as HashT>::trie_root(
child_content.data.clone().into_iter().collect(),
);
+7 -15
View File
@@ -50,7 +50,6 @@ use sp_version::NativeVersion;
use frame_support::{impl_outer_origin, parameter_types, weights::{Weight, RuntimeDbWeight}};
use sp_inherents::{CheckInherentsResult, InherentData};
use cfg_if::cfg_if;
use sp_core::storage::ChildType;
// Ensure Babe and Aura use the same crypto to simplify things a bit.
pub use sp_consensus_babe::{AuthorityId, SlotNumber};
@@ -923,22 +922,17 @@ fn test_read_storage() {
}
fn test_read_child_storage() {
const CHILD_KEY: &[u8] = b":child_storage:default:read_child_storage";
const UNIQUE_ID: &[u8] = b":unique_id";
const STORAGE_KEY: &[u8] = b"unique_id_1";
const KEY: &[u8] = b":read_child_storage";
sp_io::storage::child_set(
CHILD_KEY,
UNIQUE_ID,
ChildType::CryptoUniqueId as u32,
sp_io::default_child_storage::set(
STORAGE_KEY,
KEY,
b"test",
);
let mut v = [0u8; 4];
let r = sp_io::storage::child_read(
CHILD_KEY,
UNIQUE_ID,
ChildType::CryptoUniqueId as u32,
let r = sp_io::default_child_storage::read(
STORAGE_KEY,
KEY,
&mut v,
0,
@@ -947,10 +941,8 @@ fn test_read_child_storage() {
assert_eq!(&v, b"test");
let mut v = [0u8; 4];
let r = sp_io::storage::child_read(
CHILD_KEY,
UNIQUE_ID,
ChildType::CryptoUniqueId as u32,
let r = sp_io::default_child_storage::read(
STORAGE_KEY,
KEY,
&mut v,
8,
+1 -1
View File
@@ -373,7 +373,7 @@ mod tests {
vec![111u8, 0, 0, 0, 0, 0, 0, 0]
}
],
children: map![],
children_default: map![],
},
)
}