Merge branch 'master' into rh-grandpa-dynamic2

This commit is contained in:
Robert Habermeier
2018-11-01 20:31:13 +01:00
57 changed files with 375 additions and 279 deletions
+3
View File
@@ -1,2 +1,5 @@
doc
target
.idea/
Dockerfile
.dockerignore
+1
View File
@@ -15,3 +15,4 @@ node/runtime/wasm/target/
.vscode
polkadot.*
.DS_Store
.idea/
+4 -4
View File
@@ -845,8 +845,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "integer-sqrt"
version = "0.1.0"
source = "git+https://github.com/paritytech/integer-sqrt-rs.git#886e9cb983c46498003878afe965d55caa762025"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "interleaved-ordered"
@@ -2472,7 +2472,7 @@ dependencies = [
name = "sr-primitives"
version = "0.1.0"
dependencies = [
"integer-sqrt 0.1.0 (git+https://github.com/paritytech/integer-sqrt-rs.git)",
"integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -4232,7 +4232,7 @@ dependencies = [
"checksum hyper 0.12.12 (registry+https://github.com/rust-lang/crates.io-index)" = "4aca412c241a2dd53af261efc7adf7736fdebd67dc0d1cc1ffdbcb9407e0e810"
"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e"
"checksum indexmap 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08173ba1e906efb6538785a8844dd496f5d34f0a2d88038e95195172fc667220"
"checksum integer-sqrt 0.1.0 (git+https://github.com/paritytech/integer-sqrt-rs.git)" = "<none>"
"checksum integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ea155abb3ba6f382a75f1418988c05fe82959ed9ce727de427f9cfd425b0c903"
"checksum interleaved-ordered 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "141340095b15ed7491bd3d4ced9d20cebfb826174b6bb03386381f62b01e3d77"
"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08"
"checksum isatty 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e31a8281fc93ec9693494da65fbf28c0c2aa60a2eaec25dc58e2f31952e95edc"
+16 -17
View File
@@ -1,37 +1,36 @@
FROM phusion/baseimage:0.10.1 as builder
LABEL maintainer "chevdor@gmail.com"
FROM frolvlad/alpine-glibc AS builder
LABEL maintainer="chevdor@gmail.com"
LABEL description="This is the build stage for Substrate. Here we create the binary."
RUN apk add build-base \
cmake \
linux-headers \
openssl-dev && \
apk add --repository http://nl.alpinelinux.org/alpine/edge/community cargo
ARG PROFILE=release
WORKDIR /substrate
COPY . /substrate
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y cmake pkg-config libssl-dev git
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && \
export PATH=$PATH:$HOME/.cargo/bin && \
cargo build --$PROFILE
RUN cargo build --$PROFILE
# ===== SECOND STAGE ======
FROM phusion/baseimage:0.10.0
LABEL maintainer "chevdor@gmail.com"
FROM alpine:3.8
LABEL maintainer="chevdor@gmail.com"
LABEL description="This is the 2nd stage: a very small image where we copy the Substrate binary."
ARG PROFILE=release
COPY --from=builder /substrate/target/$PROFILE/substrate /usr/local/bin
RUN mv /usr/share/ca* /tmp && \
rm -rf /usr/share/* && \
mv /tmp/ca-certificates /usr/share/ && \
rm -rf /usr/lib/python* && \
RUN apk add --no-cache ca-certificates \
libstdc++ \
openssl
RUN rm -rf /usr/lib/python* && \
mkdir -p /root/.local/share/Substrate && \
ln -s /root/.local/share/Substrate /data
RUN rm -rf /usr/bin /usr/sbin
EXPOSE 30333 9933 9944
VOLUME ["/data"]
+11 -3
View File
@@ -118,18 +118,18 @@ Inherent extrinsic knowledge is again somewhat generic, and the actual construct
Substate Node is Substrate's pre-baked blockchain client. You can run a development node locally or configure a new chain and launch your own global testnet.
=== On Mac
=== On Mac and Ubuntu
To get going as fast as possible, there is a simple script that installs all required dependencies and installs Substrate into your path. Just open a terminal and run:
[source, shell]
----
curl https://raw.githubusercontent.com/paritytech/substrate/master/scripts/getgoing.sh -sSf | sh
curl getsubstrate.io -sSf | bash
----
You can start a local Substrate development chain with running `substrate --dev`.
To create your own global testnet, you'll need to make a new Substrate Node chain specification file ("chainspec").
To create your own global network/cryptocurrency, you'll need to make a new Substrate Node chain specification file ("chainspec").
First let's get a template chainspec that you can edit. We'll use the "staging" chain, a sort of default chain that the node comes pre-configured with:
@@ -147,21 +147,29 @@ Now, edit `~/chainspec.json` in your editor. There are a lot of individual field
},
----
Now with this new chainspec file, you can build a "raw" chain definition for your new chain:
[source, shell]
----
substrate build-spec --chain ~/chainspec.json --raw > ~/mychain.json
----
This can be fed into Substrate:
[source, shell]
----
substrate --chain ~/mychain.json
----
It won't do much until you start producing blocks though, so to do that you'll need to use the `--validator` option together with passing the seed for the account(s) that is configured to be the initial authorities:
[source, shell]
----
substrate --chain ~/mychain.json --validator --key ...
----
You can distribute `mychain.json` so that everyone can synchronise and (depending on your authorities list) validate on your chain.
== Building
+39 -15
View File
@@ -69,7 +69,7 @@ use trie::MemoryDB;
use parking_lot::RwLock;
use primitives::{H256, AuthorityId, Blake2Hasher, ChangesTrieConfiguration};
use primitives::storage::well_known_keys;
use runtime_primitives::{generic::BlockId, Justification};
use runtime_primitives::{generic::BlockId, Justification, StorageMap, ChildrenStorageMap};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero, Digest, DigestItem};
use runtime_primitives::BuildStorage;
use state_machine::backend::Backend as StateBackend;
@@ -104,10 +104,10 @@ pub fn new_client<E, S, Block>(
block_execution_strategy: ExecutionStrategy,
api_execution_strategy: ExecutionStrategy,
) -> Result<client::Client<Backend<Block>, client::LocalCallExecutor<Backend<Block>, E>, Block>, client::error::Error>
where
Block: BlockT,
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
where
Block: BlockT<Hash=H256>,
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
{
let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?);
let executor = client::LocalCallExecutor::new(backend.clone(), executor);
@@ -293,7 +293,7 @@ impl<Block: BlockT, H: Hasher> BlockImportOperation<Block, H> {
impl<Block> client::backend::BlockImportOperation<Block, Blake2Hasher>
for BlockImportOperation<Block, Blake2Hasher>
where Block: BlockT,
where Block: BlockT<Hash=H256>,
{
type State = DbState;
@@ -327,11 +327,33 @@ where Block: BlockT,
Ok(())
}
fn reset_storage<I: Iterator<Item=(Vec<u8>, Vec<u8>)>>(&mut self, iter: I) -> Result<(), client::error::Error> {
fn reset_storage(&mut self, mut top: StorageMap, children: ChildrenStorageMap) -> Result<H256, client::error::Error> {
// TODO: wipe out existing trie.
let (_, update) = self.old_state.storage_root(iter.into_iter().map(|(k, v)| (k, Some(v))));
self.updates = update;
Ok(())
if top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) {
return Err(client::error::ErrorKind::GenesisInvalid.into());
}
let mut transaction: MemoryDB<Blake2Hasher> = Default::default();
for (child_key, child_map) in children {
if !well_known_keys::is_child_storage_key(&child_key) {
return Err(client::error::ErrorKind::GenesisInvalid.into());
}
let (root, is_default, update) = self.old_state.child_storage_root(&child_key, child_map.into_iter().map(|(k, v)| (k, Some(v))));
transaction.consolidate(update);
if !is_default {
top.insert(child_key, root);
}
}
let (root, update) = self.old_state.storage_root(top.into_iter().map(|(k, v)| (k, Some(v))));
transaction.consolidate(update);
self.updates = transaction;
Ok(root)
}
fn update_changes_trie(&mut self, update: MemoryDB<Blake2Hasher>) -> Result<(), client::error::Error> {
@@ -541,7 +563,9 @@ impl<Block: BlockT> Backend<Block> {
transaction: &mut DBTransaction,
f_header: &Block::Header,
f_hash: Block::Hash,
) -> Result<(), client::error::Error> {
) -> Result<(), client::error::Error> where
Block: BlockT<Hash=H256>,
{
let meta = self.blockchain.meta.read();
let f_num = f_header.number().clone();
@@ -587,7 +611,7 @@ fn apply_state_commit(transaction: &mut DBTransaction, commit: state_db::CommitS
}
}
impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> where Block: BlockT {
impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> where Block: BlockT<Hash=H256> {
type BlockImportOperation = BlockImportOperation<Block, Blake2Hasher>;
type Blockchain = BlockchainDb<Block>;
type State = DbState;
@@ -881,7 +905,7 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
}
impl<Block> client::backend::LocalBackend<Block, Blake2Hasher> for Backend<Block>
where Block: BlockT {}
where Block: BlockT<Hash=H256> {}
#[cfg(test)]
mod tests {
@@ -1023,7 +1047,7 @@ mod tests {
).0.into();
let hash = header.hash();
op.reset_storage(storage.iter().cloned()).unwrap();
op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap();
op.set_block_data(
header.clone(),
Some(vec![]),
@@ -1102,7 +1126,7 @@ mod tests {
).0.into();
let hash = header.hash();
op.reset_storage(storage.iter().cloned()).unwrap();
op.reset_storage(storage.iter().cloned().collect(), Default::default()).unwrap();
key = op.updates.insert(b"hello");
op.set_block_data(
+6 -9
View File
@@ -18,7 +18,7 @@
use error;
use primitives::AuthorityId;
use runtime_primitives::{generic::BlockId, Justification};
use runtime_primitives::{generic::BlockId, Justification, StorageMap, ChildrenStorageMap};
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use state_machine::backend::Backend as StateBackend;
use state_machine::ChangesTrieStorage as StateChangesTrieStorage;
@@ -50,8 +50,7 @@ impl NewBlockState {
pub trait BlockImportOperation<Block, H>
where
Block: BlockT,
H: Hasher,
H: Hasher<Out=Block::Hash>,
{
/// Associated state backend type.
type State: StateBackend<H>;
@@ -73,7 +72,7 @@ where
/// Inject storage data into the database.
fn update_storage(&mut self, update: <Self::State as StateBackend<H>>::Transaction) -> error::Result<()>;
/// Inject storage data into the database replacing any existing data.
fn reset_storage<I: Iterator<Item=(Vec<u8>, Vec<u8>)>>(&mut self, iter: I) -> error::Result<()>;
fn reset_storage(&mut self, top: StorageMap, children: ChildrenStorageMap) -> error::Result<H::Out>;
/// Inject changes trie data into the database.
fn update_changes_trie(&mut self, update: MemoryDB<H>) -> error::Result<()>;
/// Update auxiliary keys. Values are `None` if should be deleted.
@@ -92,7 +91,7 @@ where
pub trait Backend<Block, H>: Send + Sync
where
Block: BlockT,
H: Hasher,
H: Hasher<Out=Block::Hash>,
{
/// Associated block insertion operation type.
@@ -131,14 +130,12 @@ where
pub trait LocalBackend<Block, H>: Backend<Block, H>
where
Block: BlockT,
H: Hasher,
H: Hasher<Out=Block::Hash>,
{}
/// Mark for all Backend implementations, that are fetching required state data from remote nodes.
pub trait RemoteBackend<Block, H>: Backend<Block, H>
where
Block: BlockT,
H: Hasher,
H: Hasher<Out=Block::Hash>,
{}
+3 -3
View File
@@ -25,7 +25,7 @@ use runtime_primitives::generic::BlockId;
use runtime_api::BlockBuilder as BlockBuilderAPI;
use {backend, error, Client, CallExecutor};
use runtime_primitives::ApplyOutcome;
use primitives::{Blake2Hasher};
use primitives::{Blake2Hasher, H256};
use hash_db::Hasher;
/// Utility for building new (valid) blocks from a stream of extrinsics.
@@ -34,7 +34,7 @@ where
B: backend::Backend<Block, H> + 'a,
E: CallExecutor<Block, H> + Clone + 'a,
Block: BlockT,
H: Hasher,
H: Hasher<Out=Block::Hash>,
H::Out: Ord,
{
@@ -50,7 +50,7 @@ impl<'a, B, E, Block> BlockBuilder<'a, B, E, Block, Blake2Hasher>
where
B: backend::Backend<Block, Blake2Hasher> + 'a,
E: CallExecutor<Block, Blake2Hasher> + Clone + 'a,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
/// Create a new instance of builder from the given client, building on the latest block.
pub fn new(client: &'a Client<B, E, Block>) -> error::Result<Self> {
+3 -3
View File
@@ -24,7 +24,7 @@ use executor::{RuntimeVersion, RuntimeInfo, NativeVersion};
use hash_db::Hasher;
use trie::MemoryDB;
use codec::Decode;
use primitives::{Blake2Hasher};
use primitives::{H256, Blake2Hasher};
use primitives::storage::well_known_keys;
use backend;
@@ -43,7 +43,7 @@ pub struct CallResult {
pub trait CallExecutor<B, H>
where
B: BlockT,
H: Hasher,
H: Hasher<Out=B::Hash>,
H::Out: Ord,
{
@@ -119,7 +119,7 @@ impl<B, E, Block> CallExecutor<Block, Blake2Hasher> for LocalCallExecutor<B, E>
where
B: backend::LocalBackend<Block, Blake2Hasher>,
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type Error = E::Error;
+29 -31
View File
@@ -184,11 +184,10 @@ pub fn new_in_mem<E, Block, S>(
executor: E,
genesis_storage: S,
) -> error::Result<Client<in_mem::Backend<Block, Blake2Hasher>, LocalCallExecutor<in_mem::Backend<Block, Blake2Hasher>, E>, Block>>
where
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
Block: BlockT,
H256: From<Block::Hash>,
where
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
Block: BlockT<Hash=H256>,
{
new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage)
}
@@ -200,12 +199,11 @@ pub fn new_with_backend<B, E, Block, S>(
executor: E,
build_genesis_storage: S,
) -> error::Result<Client<B, LocalCallExecutor<B, E>, Block>>
where
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
Block: BlockT,
H256: From<Block::Hash>,
B: backend::LocalBackend<Block, Blake2Hasher>
where
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
Block: BlockT<Hash=H256>,
B: backend::LocalBackend<Block, Blake2Hasher>
{
let call_executor = LocalCallExecutor::new(backend.clone(), executor);
Client::new(backend, call_executor, build_genesis_storage, ExecutionStrategy::NativeWhenPossible, ExecutionStrategy::NativeWhenPossible)
@@ -214,7 +212,7 @@ pub fn new_with_backend<B, E, Block, S>(
impl<B, E, Block> Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
/// Creates new Substrate Client with given blockchain and code executor.
pub fn new<S: BuildStorage>(
@@ -225,11 +223,12 @@ impl<B, E, Block> Client<B, E, Block> where
api_execution_strategy: ExecutionStrategy,
) -> error::Result<Self> {
if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() {
let genesis_storage = build_genesis_storage.build_storage()?;
let genesis_block = genesis::construct_genesis_block::<Block>(&genesis_storage);
info!("Initialising Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash());
let (genesis_storage, children_genesis_storage) = build_genesis_storage.build_storage()?;
let mut op = backend.begin_operation(BlockId::Hash(Default::default()))?;
op.reset_storage(genesis_storage.into_iter())?;
let state_root = op.reset_storage(genesis_storage, children_genesis_storage)?;
let genesis_block = genesis::construct_genesis_block::<Block>(state_root.into());
info!("Initialising Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash());
op.set_block_data(
genesis_block.deconstruct().0,
Some(vec![]),
@@ -900,7 +899,7 @@ impl<B, E, Block> Client<B, E, Block> where
impl<B, E, Block> consensus::BlockImport<Block> for Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type Error = Error;
@@ -967,7 +966,7 @@ impl<B, E, Block> consensus::BlockImport<Block> for Client<B, E, Block> where
impl<B, E, Block> consensus::Authorities<Block> for Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type Error = Error;
fn authorities(&self, at: &BlockId<Block>) -> Result<Vec<AuthorityId>, Self::Error> {
@@ -978,7 +977,7 @@ impl<B, E, Block> consensus::Authorities<Block> for Client<B, E, Block> where
impl<B, E, Block> CurrentHeight for Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type BlockNumber = <Block::Header as HeaderT>::Number;
fn current_height(&self) -> Self::BlockNumber {
@@ -989,7 +988,7 @@ impl<B, E, Block> CurrentHeight for Client<B, E, Block> where
impl<B, E, Block> BlockNumberToHash for Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher> + Clone,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type BlockNumber = <Block::Header as HeaderT>::Number;
type Hash = Block::Hash;
@@ -1002,7 +1001,7 @@ impl<B, E, Block> BlockNumberToHash for Client<B, E, Block> where
impl<B, E, Block> BlockchainEvents<Block> for Client<B, E, Block>
where
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
/// Get block import event stream.
fn import_notification_stream(&self) -> ImportNotifications<Block> {
@@ -1027,18 +1026,17 @@ impl<B, E, Block> ChainHead<Block> for Client<B, E, Block>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
fn best_block_header(&self) -> error::Result<<Block as BlockT>::Header> {
Client::best_block_header(self)
}
}
impl<B, E, Block> BlockBody<Block> for Client<B, E, Block>
where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
impl<B, E, Block> BlockBody<Block> for Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT<Hash=H256>,
{
fn block_body(&self, id: &BlockId<Block>) -> error::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
self.body(id)
@@ -1048,7 +1046,7 @@ impl<B, E, Block> BlockBody<Block> for Client<B, E, Block>
impl<B, E, Block> api::Core<Block, AuthorityId> for Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type Error = Error;
@@ -1068,7 +1066,7 @@ impl<B, E, Block> api::Core<Block, AuthorityId> for Client<B, E, Block> where
impl<B, E, Block> api::Metadata<Block, Vec<u8>> for Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type Error = Error;
@@ -1080,7 +1078,7 @@ impl<B, E, Block> api::Metadata<Block, Vec<u8>> for Client<B, E, Block> where
impl<B, E, Block> api::BlockBuilder<Block> for Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type Error = Error;
type OverlayedChanges = OverlayedChanges;
@@ -1134,7 +1132,7 @@ impl<B, E, Block> api::BlockBuilder<Block> for Client<B, E, Block> where
impl<B, E, Block> api::TaggedTransactionQueue<Block> for Client<B, E, Block> where
B: backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type Error = Error;
+6
View File
@@ -70,6 +70,12 @@ error_chain! {
display("On-chain runtime does not specify version"),
}
/// Genesis config is invalid.
GenesisInvalid {
description("Genesis config error"),
display("Genesis config provided is invalid"),
}
/// Bad justification for header.
BadJustification(h: String) {
description("bad justification for header"),
+8 -6
View File
@@ -17,15 +17,13 @@
//! Tool for creating the genesis block.
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, Zero};
use runtime_primitives::StorageMap;
/// Create a genesis block, given the initial storage.
pub fn construct_genesis_block<
Block: BlockT
> (
storage: &StorageMap
state_root: Block::Hash
) -> Block {
let state_root = <<<Block as BlockT>::Header as HeaderT>::Hashing as HashT>::trie_root(storage.clone().into_iter());
let extrinsics_root = <<<Block as BlockT>::Header as HeaderT>::Hashing as HashT>::trie_root(::std::iter::empty::<(&[u8], &[u8])>());
Block::new(
<<Block as BlockT>::Header as HeaderT>::new(
@@ -50,6 +48,7 @@ mod tests {
use test_client;
use test_client::runtime::genesismap::{GenesisConfig, additional_storage_with_genesis};
use test_client::runtime::{Hash, Transfer, Block, BlockNumber, Header, Digest, Extrinsic};
use runtime_primitives::traits::BlakeTwo256;
use primitives::{Blake2Hasher, ed25519::{Public, Pair}};
native_executor_instance!(Executor, test_client::runtime::api::dispatch, test_client::runtime::native_version, include_bytes!("../../test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm"));
@@ -144,7 +143,8 @@ mod tests {
let mut storage = GenesisConfig::new_simple(
vec![Keyring::One.to_raw_public().into(), Keyring::Two.to_raw_public().into()], 1000
).genesis_map();
let block = construct_genesis_block::<Block>(&storage);
let state_root = BlakeTwo256::trie_root(storage.clone().into_iter());
let block = construct_genesis_block::<Block>(state_root);
let genesis_hash = block.header.hash();
storage.extend(additional_storage_with_genesis(&block).into_iter());
@@ -168,7 +168,8 @@ mod tests {
let mut storage = GenesisConfig::new_simple(
vec![Keyring::One.to_raw_public().into(), Keyring::Two.to_raw_public().into()], 1000
).genesis_map();
let block = construct_genesis_block::<Block>(&storage);
let state_root = BlakeTwo256::trie_root(storage.clone().into_iter());
let block = construct_genesis_block::<Block>(state_root);
let genesis_hash = block.header.hash();
storage.extend(additional_storage_with_genesis(&block).into_iter());
@@ -193,7 +194,8 @@ mod tests {
let mut storage = GenesisConfig::new_simple(
vec![Keyring::One.to_raw_public().into(), Keyring::Two.to_raw_public().into()], 68
).genesis_map();
let block = construct_genesis_block::<Block>(&storage);
let state_root = BlakeTwo256::trie_root(storage.clone().into_iter());
let block = construct_genesis_block::<Block>(state_root);
let genesis_hash = block.header.hash();
storage.extend(additional_storage_with_genesis(&block).into_iter());
+38 -18
View File
@@ -22,13 +22,13 @@ use parking_lot::RwLock;
use error;
use backend::{self, NewBlockState};
use light;
use primitives::AuthorityId;
use primitives::{AuthorityId, storage::well_known_keys};
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero,
NumberFor, As, Digest, DigestItem};
use runtime_primitives::Justification;
use runtime_primitives::{Justification, StorageMap, ChildrenStorageMap};
use blockchain::{self, BlockStatus, HeaderBackend};
use state_machine::backend::{Backend as StateBackend, InMemory};
use state_machine::backend::{Backend as StateBackend, InMemory, Consolidate};
use state_machine::InMemoryChangesTrieStorage;
use hash_db::Hasher;
use heapsize::HeapSizeOf;
@@ -376,9 +376,9 @@ pub struct BlockImportOperation<Block: BlockT, H: Hasher> {
impl<Block, H> backend::BlockImportOperation<Block, H> for BlockImportOperation<Block, H>
where
Block: BlockT,
H: Hasher,
H: Hasher<Out=Block::Hash>,
H::Out: HeapSizeOf,
H::Out: HeapSizeOf + Ord,
{
type State = InMemory<H>;
@@ -415,9 +415,31 @@ where
Ok(())
}
fn reset_storage<I: Iterator<Item=(Vec<u8>, Vec<u8>)>>(&mut self, iter: I) -> error::Result<()> {
self.new_state = Some(InMemory::from(iter.collect::<HashMap<_, _>>()));
Ok(())
fn reset_storage(&mut self, mut top: StorageMap, children: ChildrenStorageMap) -> error::Result<H::Out> {
if top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) {
return Err(error::ErrorKind::GenesisInvalid.into());
}
let mut transaction: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> = Default::default();
for (child_key, child_map) in children {
if !well_known_keys::is_child_storage_key(&child_key) {
return Err(error::ErrorKind::GenesisInvalid.into());
}
let (root, is_default, update) = self.old_state.child_storage_root(&child_key, child_map.into_iter().map(|(k, v)| (k, Some(v))));
transaction.consolidate(update);
if !is_default {
top.insert(child_key, root);
}
}
let (root, update) = self.old_state.storage_root(top.into_iter().map(|(k, v)| (k, Some(v))));
transaction.consolidate(update);
self.new_state = Some(InMemory::from(transaction));
Ok(root)
}
fn set_aux<I>(&mut self, ops: I) -> error::Result<()>
@@ -432,9 +454,8 @@ where
pub struct Backend<Block, H>
where
Block: BlockT,
H: Hasher,
H::Out: HeapSizeOf + From<Block::Hash>,
H: Hasher<Out=Block::Hash>,
H::Out: HeapSizeOf + Ord,
{
states: RwLock<HashMap<Block::Hash, InMemory<H>>>,
changes_trie_storage: InMemoryChangesTrieStorage<H>,
@@ -444,9 +465,8 @@ where
impl<Block, H> Backend<Block, H>
where
Block: BlockT,
H: Hasher,
H::Out: HeapSizeOf + From<Block::Hash>,
H: Hasher<Out=Block::Hash>,
H::Out: HeapSizeOf + Ord,
{
/// Create a new instance of in-mem backend.
pub fn new() -> Backend<Block, H> {
@@ -461,8 +481,8 @@ where
impl<Block, H> backend::Backend<Block, H> for Backend<Block, H>
where
Block: BlockT,
H: Hasher,
H::Out: HeapSizeOf + From<Block::Hash>,
H: Hasher<Out=Block::Hash>,
H::Out: HeapSizeOf + Ord,
{
type BlockImportOperation = BlockImportOperation<Block, H>;
type Blockchain = Blockchain<Block>;
@@ -558,8 +578,8 @@ where
impl<Block, H> backend::LocalBackend<Block, H> for Backend<Block, H>
where
Block: BlockT,
H: Hasher,
H::Out: HeapSizeOf + From<Block::Hash>,
H: Hasher<Out=Block::Hash>,
H::Out: HeapSizeOf + Ord,
{}
impl<Block: BlockT> Cache<Block> {
+19 -20
View File
@@ -22,10 +22,11 @@ use futures::{Future, IntoFuture};
use parking_lot::RwLock;
use primitives::AuthorityId;
use runtime_primitives::{generic::BlockId, Justification};
use runtime_primitives::{generic::BlockId, Justification, StorageMap, ChildrenStorageMap};
use state_machine::{Backend as StateBackend, InMemoryChangesTrieStorage, TrieBackend};
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use in_mem;
use backend::{Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState};
use blockchain::HeaderBackend as BlockchainHeaderBackend;
use error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult};
@@ -73,9 +74,8 @@ impl<S, F, Block, H> ClientBackend<Block, H> for Backend<S, F> where
Block: BlockT,
S: BlockchainStorage<Block>,
F: Fetcher<Block>,
H: Hasher,
H::Out: HeapSizeOf,
H: Hasher<Out=Block::Hash>,
H::Out: HeapSizeOf + Ord,
{
type BlockImportOperation = ImportOperation<Block, S, F>;
type Blockchain = Blockchain<S, F>;
@@ -146,9 +146,8 @@ where
Block: BlockT,
S: BlockchainStorage<Block>,
F: Fetcher<Block>,
H: Hasher,
H::Out: HeapSizeOf,
H: Hasher<Out=Block::Hash>,
H::Out: HeapSizeOf + Ord,
{}
impl<S, F, Block, H> BlockImportOperation<Block, H> for ImportOperation<Block, S, F>
@@ -156,8 +155,8 @@ where
Block: BlockT,
F: Fetcher<Block>,
S: BlockchainStorage<Block>,
H: Hasher,
H: Hasher<Out=Block::Hash>,
H::Out: HeapSizeOf + Ord,
{
type State = OnDemandState<Block, S, F>;
@@ -192,9 +191,10 @@ where
Ok(())
}
fn reset_storage<I: Iterator<Item=(Vec<u8>, Vec<u8>)>>(&mut self, _iter: I) -> ClientResult<()> {
// we're not storing anything locally => ignore changes
Ok(())
fn reset_storage(&mut self, top: StorageMap, children: ChildrenStorageMap) -> ClientResult<H::Out> {
let in_mem = in_mem::Backend::<Block, H>::new();
let mut op = in_mem.begin_operation(BlockId::Hash(Default::default()))?;
op.reset_storage(top, children)
}
fn set_aux<I>(&mut self, ops: I) -> ClientResult<()>
@@ -206,12 +206,11 @@ where
}
impl<Block, S, F, H> StateBackend<H> for OnDemandState<Block, S, F>
where
Block: BlockT,
S: BlockchainStorage<Block>,
F: Fetcher<Block>,
H: Hasher,
where
Block: BlockT,
S: BlockchainStorage<Block>,
F: Fetcher<Block>,
H: Hasher<Out=Block::Hash>,
{
type Error = ClientError;
type Transaction = ();
@@ -256,11 +255,11 @@ impl<Block, S, F, H> StateBackend<H> for OnDemandState<Block, S, F>
(H::Out::default(), ())
}
fn child_storage_root<I>(&self, _key: &[u8], _delta: I) -> (Vec<u8>, Self::Transaction)
fn child_storage_root<I>(&self, _key: &[u8], _delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>
{
(H::Out::default().as_ref().to_vec(), ())
(H::Out::default().as_ref().to_vec(), true, ())
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
@@ -66,9 +66,8 @@ where
Block: BlockT,
B: ChainBackend<Block>,
F: Fetcher<Block>,
H: Hasher,
H::Out: Ord,
H: Hasher<Out=Block::Hash>,
Block::Hash: Ord,
{
type Error = ClientError;
@@ -134,7 +133,7 @@ pub fn check_execution_proof<Header, E, H>(
E: CodeExecutor<H>,
H: Hasher,
H::Out: Ord + HeapSizeOf,
{
let local_state_root = request.header.state_root();
let mut root: H::Out = Default::default();
+8 -7
View File
@@ -23,7 +23,7 @@ pub mod fetcher;
use std::sync::Arc;
use primitives::{Blake2Hasher};
use primitives::{H256, Blake2Hasher};
use runtime_primitives::BuildStorage;
use runtime_primitives::traits::Block as BlockT;
use state_machine::{CodeExecutor, ExecutionStrategy};
@@ -53,11 +53,12 @@ pub fn new_light<B, S, F, GS>(
fetcher: Arc<F>,
genesis_storage: GS,
) -> ClientResult<Client<Backend<S, F>, RemoteCallExecutor<Blockchain<S, F>, F, Blake2Hasher>, B>>
where
B: BlockT,
S: BlockchainStorage<B>,
F: Fetcher<B>,
GS: BuildStorage,
where
B: BlockT<Hash=H256>,
S: BlockchainStorage<B>,
F: Fetcher<B>,
GS: BuildStorage,
{
let executor = RemoteCallExecutor::new(backend.blockchain().clone(), fetcher);
Client::new(backend, executor, genesis_storage, ExecutionStrategy::NativeWhenPossible, ExecutionStrategy::NativeWhenPossible)
@@ -70,7 +71,7 @@ pub fn new_fetch_checker<E, H>(
where
E: CodeExecutor<H>,
H: Hasher,
{
LightDataChecker::new(executor)
}
+11 -11
View File
@@ -57,7 +57,7 @@ use runtime_primitives::traits::{
NumberFor, Block as BlockT, Header as HeaderT, DigestFor,
};
use runtime_primitives::generic::BlockId;
use substrate_primitives::{ed25519, AuthorityId, Blake2Hasher};
use substrate_primitives::{ed25519, H256, AuthorityId, Blake2Hasher};
use tokio::timer::Interval;
use grandpa::Error as GrandpaError;
@@ -161,7 +161,7 @@ pub trait BlockStatus<Block: BlockT> {
fn block_number(&self, hash: Block::Hash) -> Result<Option<NumberFor<Block>>, Error>;
}
impl<B, E, Block: BlockT> BlockStatus<Block> for Arc<Client<B, E, Block>> where
impl<B, E, Block: BlockT<Hash=H256>> BlockStatus<Block> for Arc<Client<B, E, Block>> where
B: Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
NumberFor<Block>: BlockNumberOps,
@@ -445,7 +445,7 @@ struct Environment<B, E, Block: BlockT, N: Network> {
set_id: u64,
}
impl<Block: BlockT, B, E, N> grandpa::Chain<Block::Hash, NumberFor<Block>> for Environment<B, E, Block, N> where
impl<Block: BlockT<Hash=H256>, B, E, N> grandpa::Chain<Block::Hash, NumberFor<Block>> for Environment<B, E, Block, N> where
Block: 'static,
B: Backend<Block, Blake2Hasher> + 'static,
E: CallExecutor<Block, Blake2Hasher> + 'static,
@@ -541,7 +541,7 @@ impl<H, N> From<grandpa::Error> for ExitOrError<H, N> {
}
}
impl<B, E, Block: BlockT, N> voter::Environment<Block::Hash, NumberFor<Block>> for Environment<B, E, Block, N> where
impl<B, E, Block: BlockT<Hash=H256>, N> voter::Environment<Block::Hash, NumberFor<Block>> for Environment<B, E, Block, N> where
Block: 'static,
B: Backend<Block, Blake2Hasher> + 'static,
E: CallExecutor<Block, Blake2Hasher> + 'static,
@@ -730,7 +730,7 @@ impl<B, E, Block: BlockT, N> voter::Environment<Block::Hash, NumberFor<Block>> f
}
/// Client side of the GRANDPA APIs declared in fg-primitives.
pub trait ApiClient<Block: BlockT> {
pub trait ApiClient<Block: BlockT<Hash=H256>> {
/// Get the genesis authorities for GRANDPA.
fn genesis_authorities(&self) -> Result<Vec<(AuthorityId, u64)>, ClientError>;
@@ -739,7 +739,7 @@ pub trait ApiClient<Block: BlockT> {
-> Result<Option<ScheduledChange<NumberFor<Block>>>, ClientError>;
}
impl<B, E, Block: BlockT> ApiClient<Block> for Arc<Client<B, E, Block>> where
impl<B, E, Block: BlockT<Hash=H256>> ApiClient<Block> for Arc<Client<B, E, Block>> where
B: Backend<Block, Blake2Hasher> + 'static,
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone,
DigestFor<Block>: Encode,
@@ -770,13 +770,13 @@ impl<B, E, Block: BlockT> ApiClient<Block> for Arc<Client<B, E, Block>> where
/// This scans each imported block for signals of changing authority set.
/// When using GRANDPA, the block import worker should be using this block import
/// object.
pub struct GrandpaBlockImport<B, E, Block: BlockT, Api> {
pub struct GrandpaBlockImport<B, E, Block: BlockT<Hash=H256>, Api> {
inner: Arc<Client<B, E, Block>>,
authority_set: SharedAuthoritySet<Block::Hash, NumberFor<Block>>,
api_client: Api,
}
impl<B, E, Block: BlockT, Api> BlockImport<Block> for GrandpaBlockImport<B, E, Block, Api> where
impl<B, E, Block: BlockT<Hash=H256>, Api> BlockImport<Block> for GrandpaBlockImport<B, E, Block, Api> where
B: Backend<Block, Blake2Hasher> + 'static,
E: CallExecutor<Block, Blake2Hasher> + 'static + Clone,
DigestFor<Block>: Encode,
@@ -825,14 +825,14 @@ impl<B, E, Block: BlockT, Api> BlockImport<Block> for GrandpaBlockImport<B, E, B
/// Half of a link between a block-import worker and a the background voter.
// This should remain non-clone.
pub struct LinkHalf<B, E, Block: BlockT> {
pub struct LinkHalf<B, E, Block: BlockT<Hash=H256>> {
client: Arc<Client<B, E, Block>>,
authority_set: SharedAuthoritySet<Block::Hash, NumberFor<Block>>,
}
/// Make block importer and link half necessary to tie the background voter
/// to it.
pub fn block_import<B, E, Block: BlockT, Api>(client: Arc<Client<B, E, Block>>, api_client: Api)
pub fn block_import<B, E, Block: BlockT<Hash=H256>, Api>(client: Arc<Client<B, E, Block>>, api_client: Api)
-> Result<(GrandpaBlockImport<B, E, Block, Api>, LinkHalf<B, E, Block>), ClientError>
where
B: Backend<Block, Blake2Hasher> + 'static,
@@ -870,7 +870,7 @@ pub fn block_import<B, E, Block: BlockT, Api>(client: Arc<Client<B, E, Block>>,
/// Run a GRANDPA voter as a task. Provide configuration and a link to a
/// block import worker that has already been instantiated with `block_import`.
pub fn run_grandpa<B, E, Block: BlockT, N>(
pub fn run_grandpa<B, E, Block: BlockT<Hash=H256>, N>(
config: Config,
link: LinkHalf<B, E, Block>,
network: N,
+2 -2
View File
@@ -23,7 +23,7 @@ use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor};
use runtime_primitives::generic::{BlockId};
use consensus::{ImportBlock, ImportResult};
use runtime_primitives::Justification;
use primitives::{Blake2Hasher, AuthorityId};
use primitives::{H256, Blake2Hasher, AuthorityId};
/// Local client abstraction for the network.
pub trait Client<Block: BlockT>: Send + Sync {
@@ -72,7 +72,7 @@ impl<B, E, Block> Client<Block> for SubstrateClient<B, E, Block> where
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
Self: BlockImport<Block, Error=Error>,
Block: BlockT
Block: BlockT<Hash=H256>,
{
fn import(&self, block: ImportBlock<Block>, new_authorities: Option<Vec<AuthorityId>>)
-> Result<ImportResult, Error>
+1 -1
View File
@@ -117,7 +117,7 @@ pub struct Service<B: BlockT + 'static, S: Specialization<B>, H: ExHashT> {
handler: Arc<Protocol<B, S, H>>,
/// Protocol ID.
protocol_id: ProtocolId,
/// Sender for messages to the backgound service task, and handle for the background thread.
/// Sender for messages to the background service task, and handle for the background thread.
/// Dropping the sender should close the task and the thread.
/// This is an `Option` because we need to extract it in the destructor.
bg_thread: Option<(oneshot::Sender<()>, thread::JoinHandle<()>)>,
+2 -1
View File
@@ -33,7 +33,7 @@ use transaction_pool::{
};
use jsonrpc_macros::pubsub;
use jsonrpc_pubsub::SubscriptionId;
use primitives::{Bytes, Blake2Hasher};
use primitives::{Bytes, Blake2Hasher, H256};
use rpc::futures::{Sink, Stream, Future};
use runtime_primitives::{generic, traits};
use subscriptions::Subscriptions;
@@ -107,6 +107,7 @@ impl<B, E, P> AuthorApi<ExHash<P>, BlockHash<P>, ExtrinsicFor<P>, Vec<ExtrinsicF
B: client::backend::Backend<<P as PoolChainApi>::Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<<P as PoolChainApi>::Block, Blake2Hasher> + Send + Sync + 'static,
P: PoolChainApi + Sync + Send + 'static,
P::Block: traits::Block<Hash=H256>,
P::Error: 'static,
{
type Metadata = ::metadata::Metadata;
+3 -2
View File
@@ -23,6 +23,7 @@ use jsonrpc_macros::{pubsub, Trailing};
use jsonrpc_pubsub::SubscriptionId;
use rpc::Result as RpcResult;
use rpc::futures::{stream, Future, Sink, Stream};
use primitives::H256;
use runtime_primitives::generic::{BlockId, SignedBlock};
use runtime_primitives::traits::{Block as BlockT, Header, NumberFor};
use runtime_version::RuntimeVersion;
@@ -100,7 +101,7 @@ impl<B, E, Block: BlockT> Chain<B, E, Block> {
}
impl<B, E, Block> Chain<B, E, Block> where
Block: BlockT + 'static,
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
{
@@ -113,7 +114,7 @@ impl<B, E, Block> Chain<B, E, Block> where
}
impl<B, E, Block> ChainApi<Block::Hash, Block::Header, NumberFor<Block>, Block::Extrinsic> for Chain<B, E, Block> where
Block: BlockT + 'static,
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
{
+3 -2
View File
@@ -25,6 +25,7 @@ use client::{self, Client, CallExecutor, BlockchainEvents, runtime_api::Metadata
use jsonrpc_macros::Trailing;
use jsonrpc_macros::pubsub;
use jsonrpc_pubsub::SubscriptionId;
use primitives::H256;
use primitives::hexdisplay::HexDisplay;
use primitives::storage::{StorageKey, StorageData, StorageChangeSet};
use primitives::{Blake2Hasher, Bytes};
@@ -104,7 +105,7 @@ impl<B, E, Block: BlockT> State<B, E, Block> {
}
impl<B, E, Block> State<B, E, Block> where
Block: BlockT,
Block: BlockT<Hash=H256>,
B: client::backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
{
@@ -114,7 +115,7 @@ impl<B, E, Block> State<B, E, Block> where
}
impl<B, E, Block> StateApi<Block::Hash> for State<B, E, Block> where
Block: BlockT + 'static,
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
{
+4 -4
View File
@@ -20,7 +20,7 @@ use std::collections::HashMap;
use std::fs::File;
use std::path::PathBuf;
use primitives::storage::{StorageKey, StorageData};
use runtime_primitives::{BuildStorage, StorageMap};
use runtime_primitives::{BuildStorage, StorageMap, ChildrenStorageMap};
use serde_json as json;
use components::RuntimeGenesis;
@@ -63,10 +63,10 @@ impl<G: RuntimeGenesis> GenesisSource<G> {
}
impl<'a, G: RuntimeGenesis> BuildStorage for &'a ChainSpec<G> {
fn build_storage(self) -> Result<StorageMap, String> {
fn build_storage(self) -> Result<(StorageMap, ChildrenStorageMap), String> {
match self.genesis.resolve()? {
Genesis::Runtime(gc) => gc.build_storage(),
Genesis::Raw(map) => Ok(map.into_iter().map(|(k, v)| (k.0, v.0)).collect()),
Genesis::Raw(map) => Ok((map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), Default::default())),
}
}
}
@@ -185,7 +185,7 @@ impl<G: RuntimeGenesis> ChainSpec<G> {
};
let genesis = match (raw, self.genesis.resolve()?) {
(true, Genesis::Runtime(g)) => {
let storage = g.build_storage()?.into_iter()
let storage = g.build_storage()?.0.into_iter()
.map(|(k, v)| (StorageKey(k), StorageData(v)))
.collect();
+2 -2
View File
@@ -30,7 +30,7 @@ use substrate_executor::{NativeExecutor, NativeExecutionDispatch};
use transaction_pool::txpool::{self, Options as TransactionPoolOptions, Pool as TransactionPool};
use runtime_primitives::{traits::Block as BlockT, traits::Header as HeaderT, BuildStorage};
use config::Configuration;
use primitives::{Blake2Hasher};
use primitives::{H256, Blake2Hasher};
// Type aliases.
// These exist mainly to avoid typing `<F as Factory>::Foo` all over the code.
@@ -119,7 +119,7 @@ impl<T: Serialize + DeserializeOwned + BuildStorage> RuntimeGenesis for T {}
/// A collection of types and methods to build a service on top of the substrate service.
pub trait ServiceFactory: 'static + Sized {
/// Block type.
type Block: BlockT;
type Block: BlockT<Hash=H256>;
/// Network protocol extensions.
type NetworkProtocol: network::specialization::Specialization<Self::Block>;
/// Chain runtime.
+3 -3
View File
@@ -26,7 +26,7 @@ use client::{self, error, Client as SubstrateClient, CallExecutor};
use client::runtime_api::{Core, BlockBuilder as BlockBuilderAPI, id::BLOCK_BUILDER};
use codec::{Decode, Encode};
use consensus_common::{self, InherentData, evaluation, offline_tracker::OfflineTracker};
use primitives::{AuthorityId, ed25519, Blake2Hasher};
use primitives::{H256, AuthorityId, ed25519, Blake2Hasher};
use runtime_primitives::traits::{Block as BlockT, Hash as HashT, Header as HeaderT};
use runtime_primitives::generic::BlockId;
use transaction_pool::txpool::{self, Pool as TransactionPool};
@@ -70,7 +70,7 @@ pub trait AuthoringApi:
impl<'a, B, E, Block> BlockBuilder<Block> for client::block_builder::BlockBuilder<'a, B, E, Block, Blake2Hasher> where
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone + 'static,
Block: BlockT
Block: BlockT<Hash=H256>,
{
fn push_extrinsic(&mut self, extrinsic: <Block as BlockT>::Extrinsic) -> Result<(), error::Error> {
client::block_builder::BlockBuilder::push(self, extrinsic).map_err(Into::into)
@@ -80,7 +80,7 @@ impl<'a, B, E, Block> BlockBuilder<Block> for client::block_builder::BlockBuilde
impl<'a, B, E, Block> AuthoringApi for SubstrateClient<B, E, Block> where
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone + 'static,
Block: BlockT,
Block: BlockT<Hash=H256>,
{
type Block = Block;
type Error = client::error::Error;
+1 -1
View File
@@ -5,7 +5,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
num-traits = { version = "0.2", default-features = false }
integer-sqrt = { git = "https://github.com/paritytech/integer-sqrt-rs.git", branch = "master" }
integer-sqrt = { version = "0.1.2" }
serde = { version = "1.0", optional = true }
serde_derive = { version = "1.0", optional = true }
parity-codec = { version = "2.1", default-features = false }
+16 -7
View File
@@ -79,6 +79,10 @@ pub use serde::{Serialize, de::DeserializeOwned};
#[cfg(feature = "std")]
pub type StorageMap = HashMap<Vec<u8>, Vec<u8>>;
/// A set of key value pairs for children storage;
#[cfg(feature = "std")]
pub type ChildrenStorageMap = HashMap<Vec<u8>, StorageMap>;
/// Complex storage builder stuff.
#[cfg(feature = "std")]
pub trait BuildStorage {
@@ -87,13 +91,13 @@ pub trait BuildStorage {
trace!(target: "build_storage", "{} <= {}", substrate_primitives::hexdisplay::HexDisplay::from(&r), ascii_format(data));
r
}
fn build_storage(self) -> Result<StorageMap, String>;
fn build_storage(self) -> Result<(StorageMap, ChildrenStorageMap), String>;
}
#[cfg(feature = "std")]
impl BuildStorage for StorageMap {
fn build_storage(self) -> Result<StorageMap, String> {
Ok(self)
fn build_storage(self) -> Result<(StorageMap, ChildrenStorageMap), String> {
Ok((self, Default::default()))
}
}
@@ -308,14 +312,19 @@ macro_rules! impl_outer_config {
}
#[cfg(any(feature = "std", test))]
impl $crate::BuildStorage for $main {
fn build_storage(self) -> ::std::result::Result<$crate::StorageMap, String> {
let mut s = $crate::StorageMap::new();
fn build_storage(self) -> ::std::result::Result<($crate::StorageMap, $crate::ChildrenStorageMap), String> {
let mut top = $crate::StorageMap::new();
let mut children = $crate::ChildrenStorageMap::new();
$(
if let Some(extra) = self.$snake {
s.extend(extra.build_storage()?);
let (other_top, other_children) = extra.build_storage()?;
top.extend(other_top);
for (other_child_key, other_child_map) in other_children {
children.entry(other_child_key).or_default().extend(other_child_map);
}
}
)*
Ok(s)
Ok((top, children))
}
}
}
+20 -5
View File
@@ -23,7 +23,7 @@ use std::marker::PhantomData;
use hash_db::Hasher;
use trie_backend::TrieBackend;
use trie_backend_essence::TrieBackendStorage;
use substrate_trie::{TrieDBMut, TrieMut, MemoryDB, trie_root, child_trie_root};
use substrate_trie::{TrieDBMut, TrieMut, MemoryDB, trie_root, child_trie_root, default_child_trie_root};
use heapsize::HeapSizeOf;
/// A state backend is used to read state data and can have changes committed
@@ -71,8 +71,9 @@ pub trait Backend<H: Hasher> {
H::Out: Ord;
/// Calculate the child storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, Self::Transaction)
/// the backend, and produce a "transaction" that can be used to commit. The second argument
/// is true if child storage root equals default storage root.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
@@ -190,6 +191,18 @@ impl<H> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
}
}
impl<H> From<Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>> for InMemory<H> {
fn from(inner: Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>) -> Self {
let mut expanded: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>> = HashMap::new();
for (child_key, key, value) in inner {
if let Some(value) = value {
expanded.entry(child_key).or_default().insert(key, value);
}
}
expanded.into()
}
}
impl super::Error for Void {}
impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: HeapSizeOf {
@@ -236,7 +249,7 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: HeapSizeOf {
(root, full_transaction)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, Self::Transaction)
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
@@ -256,7 +269,9 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: HeapSizeOf {
let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect();
(root, full_transaction)
let is_default = root == default_child_trie_root::<H>(&storage_key);
(root, is_default, full_transaction)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
+2 -2
View File
@@ -128,7 +128,7 @@ where
fn child_storage_root_transaction(&mut self, storage_key: &[u8]) -> (Vec<u8>, B::Transaction) {
self.mark_dirty();
let (root, transaction) = {
let (root, is_default, transaction) = {
let delta = self.overlay.committed.children.get(storage_key)
.into_iter()
.flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone())))
@@ -139,7 +139,7 @@ where
self.backend.child_storage_root(storage_key, delta)
};
let root_val = if root == default_child_trie_root::<H>(storage_key) {
let root_val = if is_default {
None
} else {
Some(root.clone())
@@ -152,7 +152,7 @@ impl<S, H> Backend<H> for ProvingBackend<S, H>
self.backend.storage_root(delta)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, Self::Transaction)
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
@@ -121,17 +121,19 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
(root, write_overlay)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, Self::Transaction)
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, bool, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
let default_root = default_child_trie_root::<H>(storage_key);
let mut write_overlay = MemoryDB::default();
let mut root = match self.storage(storage_key) {
Ok(value) => value.unwrap_or(default_child_trie_root::<H>(storage_key)),
Err(e) => {
warn!(target: "trie", "Failed to read child storage root: {}", e);
default_child_trie_root::<H>(storage_key)
default_root.clone()
},
};
@@ -147,7 +149,9 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
}
}
(root, write_overlay)
let is_default = root == default_root;
(root, is_default, write_overlay)
}
fn try_into_trie_backend(self) -> Option<TrieBackend<Self::TrieBackendStorage, H>> {
+17 -15
View File
@@ -55,14 +55,27 @@ const CHANNEL_SIZE: usize = 262144;
/// Initialise telemetry.
pub fn init_telemetry(config: TelemetryConfig) -> slog_scope::GlobalLoggerGuard {
let writer = TelemetryWriter::new();
let out_sync = writer.out.clone();
let log = slog::Logger::root(
slog_async::Async::new(
slog_json::Json::default(TelemetryWriter::new(config)).fuse()
slog_json::Json::default(writer).fuse()
).chan_size(CHANNEL_SIZE)
.overflow_strategy(slog_async::OverflowStrategy::DropAndReport)
.build().fuse(), o!()
);
slog_scope::set_global_logger(log)
let logger_guard = slog_scope::set_global_logger(log);
thread::spawn(move || {
loop {
trace!(target: "telemetry", "Connecting to Telemetry...");
let _ = ws::connect(config.url.as_str(), |out| Connection::new(out, &*out_sync, &config));
thread::sleep(time::Duration::from_millis(5000));
}
});
return logger_guard;
}
/// Exactly equivalent to `slog_scope::info`, provided as a convenience.
@@ -117,19 +130,8 @@ struct TelemetryWriter {
}
impl TelemetryWriter {
fn new(config: TelemetryConfig) -> Self {
let out_sync = Arc::new(Mutex::new(None));
let out = out_sync.clone();
thread::spawn(move || {
loop {
trace!(target: "telemetry", "Connecting to Telemetry...");
let _ = ws::connect(config.url.as_str(), |out| Connection::new(out, &*out_sync, &config));
thread::sleep(time::Duration::from_millis(5000));
}
});
fn new() -> Self {
let out = Arc::new(Mutex::new(None));
TelemetryWriter {
buffer: Vec::new(),
+3 -1
View File
@@ -43,6 +43,7 @@ pub use executor::NativeExecutor;
use std::sync::Arc;
use primitives::Blake2Hasher;
use runtime_primitives::StorageMap;
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Hash as HashT};
use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis};
use keyring::Keyring;
@@ -98,7 +99,8 @@ fn genesis_config(support_changes_trie: bool) -> GenesisConfig {
fn genesis_storage(support_changes_trie: bool) -> StorageMap {
let mut storage = genesis_config(support_changes_trie).genesis_map();
let block: runtime::Block = client::genesis::construct_genesis_block(&storage);
let state_root = <<<runtime::Block as BlockT>::Header as HeaderT>::Hashing as HashT>::trie_root(storage.clone().into_iter());
let block: runtime::Block = client::genesis::construct_genesis_block(state_root);
storage.extend(additional_storage_with_genesis(&block));
storage
}
+4 -4
View File
@@ -167,8 +167,8 @@ dependencies = [
[[package]]
name = "integer-sqrt"
version = "0.1.0"
source = "git+https://github.com/paritytech/integer-sqrt-rs.git#886e9cb983c46498003878afe965d55caa762025"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazy_static"
@@ -493,7 +493,7 @@ dependencies = [
name = "sr-primitives"
version = "0.1.0"
dependencies = [
"integer-sqrt 0.1.0 (git+https://github.com/paritytech/integer-sqrt-rs.git)",
"integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -762,7 +762,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1679e6ea370dee694f91f1dc469bf94cf8f52051d147aec3e1f9497c6fc22461"
"checksum hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4da5f0e01bd8a71a224a4eedecaacfcabda388dbb7a80faf04d3514287572d95"
"checksum hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d340b6514f232f6db1bd16db65302a5278a04fef9ce867cb932e7e5fa21130a"
"checksum integer-sqrt 0.1.0 (git+https://github.com/paritytech/integer-sqrt-rs.git)" = "<none>"
"checksum integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ea155abb3ba6f382a75f1418988c05fe82959ed9ce727de427f9cfd425b0c903"
"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73"
"checksum lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7"
"checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d"
+1 -2
View File
@@ -50,7 +50,7 @@ impl<B, E, Block: traits::Block> ChainApi<B, E, Block> {
}
impl<B, E, Block> txpool::ChainApi for ChainApi<B, E, Block> where
Block: traits::Block,
Block: traits::Block<Hash=H256>,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + Clone + 'static,
{
@@ -75,4 +75,3 @@ impl<B, E, Block> txpool::ChainApi for ChainApi<B, E, Block> where
Blake2Hasher::hash(&ex.encode())
}
}
+1 -1
View File
@@ -255,7 +255,7 @@ mod tests {
treasury: Some(Default::default()),
contract: Some(Default::default()),
upgrade_key: Some(Default::default()),
}.build_storage().unwrap())
}.build_storage().unwrap().0)
}
fn construct_block(
+5 -5
View File
@@ -167,8 +167,8 @@ dependencies = [
[[package]]
name = "integer-sqrt"
version = "0.1.0"
source = "git+https://github.com/paritytech/integer-sqrt-rs.git#886e9cb983c46498003878afe965d55caa762025"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lazy_static"
@@ -263,7 +263,7 @@ dependencies = [
name = "node-runtime"
version = "0.1.0"
dependencies = [
"integer-sqrt 0.1.0 (git+https://github.com/paritytech/integer-sqrt-rs.git)",
"integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"node-primitives 0.1.0",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -554,7 +554,7 @@ dependencies = [
name = "sr-primitives"
version = "0.1.0"
dependencies = [
"integer-sqrt 0.1.0 (git+https://github.com/paritytech/integer-sqrt-rs.git)",
"integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1051,7 +1051,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
"checksum heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1679e6ea370dee694f91f1dc469bf94cf8f52051d147aec3e1f9497c6fc22461"
"checksum hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4da5f0e01bd8a71a224a4eedecaacfcabda388dbb7a80faf04d3514287572d95"
"checksum hex-literal-impl 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1d340b6514f232f6db1bd16db65302a5278a04fef9ce867cb932e7e5fa21130a"
"checksum integer-sqrt 0.1.0 (git+https://github.com/paritytech/integer-sqrt-rs.git)" = "<none>"
"checksum integer-sqrt 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ea155abb3ba6f382a75f1418988c05fe82959ed9ce727de427f9cfd425b0c903"
"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73"
"checksum lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7"
"checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d"
+1 -1
View File
@@ -7,7 +7,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
crate-type = ["cdylib"]
[dependencies]
integer-sqrt = { git = "https://github.com/paritytech/integer-sqrt-rs.git", branch = "master" }
integer-sqrt = { version = "0.1.2" }
safe-mix = { version = "1.0", default-features = false}
parity-codec-derive = { version = "2.1" }
parity-codec = { version = "2.1", default-features = false }
+1 -1
View File
@@ -186,7 +186,7 @@ mod tests {
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
system::GenesisConfig::<Test>::default().build_storage().unwrap().into()
system::GenesisConfig::<Test>::default().build_storage().unwrap().0.into()
}
#[test]
+1 -1
View File
@@ -260,7 +260,7 @@ decl_storage! {
}
add_extra_genesis {
config(balances): Vec<(T::AccountId, T::Balance)>;
build(|storage: &mut primitives::StorageMap, config: &GenesisConfig<T>| {
build(|storage: &mut primitives::StorageMap, _: &mut primitives::ChildrenStorageMap, config: &GenesisConfig<T>| {
let ids: Vec<_> = config.balances.iter().map(|x| x.0.clone()).collect();
for i in 0..(ids.len() + ENUM_SET_SIZE - 1) / ENUM_SET_SIZE {
storage.insert(GenesisConfig::<T>::hash(&<EnumSet<T>>::key_for(T::AccountIndex::sa(i))).to_vec(),
+2 -2
View File
@@ -85,7 +85,7 @@ impl ExtBuilder {
self
}
pub fn build(self) -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
let mut t = system::GenesisConfig::<Runtime>::default().build_storage().unwrap().0;
let balance_factor = if self.existential_deposit > 0 {
256
} else {
@@ -103,7 +103,7 @@ impl ExtBuilder {
transfer_fee: self.transfer_fee,
creation_fee: self.creation_fee,
reclaim_rebate: 0,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.into()
}
}
+1 -1
View File
@@ -130,7 +130,7 @@ decl_storage! {
#[serde(with = "substrate_primitives::bytes")]
config(code): Vec<u8>;
build(|storage: &mut primitives::StorageMap, config: &GenesisConfig<T>| {
build(|storage: &mut primitives::StorageMap, _: &mut primitives::ChildrenStorageMap, config: &GenesisConfig<T>| {
use codec::{Encode, KeyedVec};
let auth_count = config.authorities.len() as u32;
+2 -2
View File
@@ -50,11 +50,11 @@ impl system::Trait for Test {
}
pub fn new_test_ext(authorities: Vec<u64>) -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
t.extend(GenesisConfig::<Test>{
code: vec![],
authorities,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.into()
}
+13 -9
View File
@@ -99,9 +99,9 @@ These functions only modify the local `Map`.
A lookup in the local cache consists of at least one `Map` lookup, for locating the specific account. For `get_storage` there is a second lookup: because account's storage is implemented as a nested map, another lookup is required for fetching a storage value by a key.
While these functions only modify the local `Map`, if changes made by them are committed to the bottommost `AccountDb`, each changed entry in the `Map` will require a DB write. It should be ensured that pricing accounts for this fact.
While these functions only modify the local `Map`, if changes made by them are committed to the bottommost `AccountDb`, each changed entry in the `Map` will require a DB write. Moreover, if the balance of the account is changed to be below `existential_deposit` then that account along with all its storage will be removed, which requires time proportional to the number of storage entries that account has. It should be ensured that pricing accounts for these facts.
**complexity**: Each lookup has a logarithmical computing time to the number of inserted entries. No additional memory is required.
**complexity**: Each lookup has a logarithmical computing time to the number of already inserted entries. No additional memory is required.
## commit
@@ -115,13 +115,15 @@ Note that in case of storage modification we need to construct a key in the unde
- then perform `blake2_256` hashing of the storage key.
- concatenation of these hashes will constitute the key in the underlying storage.
**complexity**:
There is also a special case to think of: if the balance of some account goes below `existential_deposit`, then all storage entries of that account will be erased, which requires time proprotional to the number of storage entries that account has.
**complexity**: `N` inserts into a `Map` or eventually into the storage (if committed). Every deleted account will induce removal of all its storage which is proportional to the number of storage entries that account has.
## revert
Consists of dropping (in the Rust sense) of the `AccountDb`.
**complexity**: TODO: What about the recursive dropping of all values?
**complexity**: Computing complexity is proportional to a number of changed entries in a overlay. No additional memory is required.
# Executive
@@ -139,9 +141,11 @@ This function performs the following steps:
In the course of the execution this function can perform up to 4 DB reads: 2x `get_balance`, fee and `existential_deposit`. The last two can be pre-loaded pushing the cost of loading to a higher level and making it a one time. It can also induce up to 2 DB writes via `set_balance` if flushed to the storage.
Moreover, if the source balance goes below `existential_deposit` then the account will be deleted along with all its storage which requires time proportional to the number of storage entries of that account.
Assuming marshaled size of a balance value is of the constant size we can neglect its effect on the performance.
**complexity**: up to 4 DB reads and up to 2 DB writes. For the current `AccountDb` implementation computing complexity also depends on the depth of the `AccountDb` cascade. Memorywise it can be assumed to be constant.
**complexity**: up to 4 DB reads and up to 2 DB writes (if flushed to the storage) in the standard case. If removal of the source account takes place then it will additionally perform a DB write per one storage entry that the account has. For the current `AccountDb` implementation computing complexity also depends on the depth of the `AccountDb` cascade. Memorywise it can be assumed to be constant.
## Call
@@ -159,11 +163,11 @@ The execution of this function will involve 2 DB reads for querying `MaxDepth` a
Loading code most probably will trigger a DB read, since the code is immutable and therefore will not get into the cache (unless a suicide removes it).
Also, `transfer` can make up to 4 DB reads and up to 2 DB writes (if flushed to the storage).
Also, `transfer` can make up to 4 DB reads and up to 2 DB writes (if flushed to the storage) in the standard case. If removal of the source account takes place then it will additionally perform a DB write per one storage entry that the account has.
Finally, all changes are `commit`-ted into the underlying overlay. The complexity of this depends on the number of changes performed by the code. Thus, the pricing of storage modification should account for that.
**complexity**: Up to 7 DB reads. DB read of the code is of dynamic size. There can also be up to 2 DB writes (if flushed to the storage).
**complexity**: Up to 7 DB reads. DB read of the code is of dynamic size. There can also be up to 2 DB writes (if flushed to the storage). Additionally, if the source account removal takes place a DB write will be performed per one storage entry that the account has.
## Create
@@ -184,13 +188,13 @@ This function takes the code of the constructor and input data. Creation of a co
The execution of this function involves 2 DB reads for querying `create_base_fee` and `MaxDepth` constants. These values can be pre-loaded pushing the cost of loading to a higher level and making it a one time.
Also, `transfer` can make up to 4 DB reads and up to 2 DB writes (if flushed to the storage).
Also, `transfer` can make up to 4 DB reads and up to 2 DB writes (if flushed to the storage) in the standard case. If removal of the source account takes place then it will additionally perform a DB write per one storage entry that the account has.
Storing the code in the overlay may induce another DB write (if flushed to the storage) with the size proportional to the size of the constructor code.
Finally, all changes are `commit`-ted into the underlying overlay. The complexity of this depends on the number of changes performed by the constructor code. Thus, the pricing of storage modification should account for that.
**complexity**: Up to 6 DB reads and induces up to 3 DB writes (if flushed to the storage), one of which is dependent on the size of the code.
**complexity**: Up to 6 DB reads and induces up to 3 DB writes (if flushed to the storage), one of which is dependent on the size of the code. Additionally, if the source account removal takes place a DB write will be performed per one storage entry that the account has.
# Externalities
+3 -3
View File
@@ -121,7 +121,7 @@ impl ExtBuilder {
fn build(self) -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Test>::default()
.build_storage()
.unwrap();
.unwrap().0;
t.extend(
balances::GenesisConfig::<Test> {
balances: vec![],
@@ -132,7 +132,7 @@ impl ExtBuilder {
creation_fee: self.creation_fee,
reclaim_rebate: 0,
}.build_storage()
.unwrap(),
.unwrap().0,
);
t.extend(
GenesisConfig::<Test> {
@@ -143,7 +143,7 @@ impl ExtBuilder {
max_depth: 100,
block_gas_limit: self.block_gas_limit,
}.build_storage()
.unwrap(),
.unwrap().0,
);
runtime_io::TestExternalities::new(t)
}
+5 -5
View File
@@ -117,7 +117,7 @@ mod tests {
}
pub fn new_test_ext(with_council: bool) -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
t.extend(balances::GenesisConfig::<Test>{
balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)],
transaction_base_fee: 0,
@@ -126,12 +126,12 @@ mod tests {
transfer_fee: 0,
creation_fee: 0,
reclaim_rebate: 0,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(democracy::GenesisConfig::<Test>{
launch_period: 1,
voting_period: 3,
minimum_deposit: 1,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(seats::GenesisConfig::<Test> {
candidacy_bond: 9,
voter_bond: 3,
@@ -147,11 +147,11 @@ mod tests {
presentation_duration: 2,
desired_seats: 2,
term_duration: 5,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(voting::GenesisConfig::<Test> {
cooloff_period: 2,
voting_period: 1,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
runtime_io::TestExternalities::new(t)
}
+1 -1
View File
@@ -174,7 +174,7 @@ decl_storage! {
}
add_extra_genesis {
config(_marker): ::std::marker::PhantomData<T>;
build(|_, _| {});
build(|_, _, _| {});
}
}
+3 -3
View File
@@ -353,7 +353,7 @@ mod tests {
}
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
t.extend(balances::GenesisConfig::<Test>{
balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)],
transaction_base_fee: 0,
@@ -362,12 +362,12 @@ mod tests {
transfer_fee: 0,
creation_fee: 0,
reclaim_rebate: 0,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(GenesisConfig::<Test>{
launch_period: 1,
voting_period: 1,
minimum_deposit: 1,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
runtime_io::TestExternalities::new(t)
}
+3 -3
View File
@@ -314,13 +314,13 @@ mod tests {
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> sr_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
// We use default for brevity, but you can configure as desired if needed.
t.extend(balances::GenesisConfig::<Test>::default().build_storage().unwrap());
t.extend(balances::GenesisConfig::<Test>::default().build_storage().unwrap().0);
t.extend(GenesisConfig::<Test>{
dummy: 42,
foo: 24,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.into()
}
+4 -4
View File
@@ -322,7 +322,7 @@ mod tests {
#[test]
fn balance_transfer_dispatch_works() {
let mut t = system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
let mut t = system::GenesisConfig::<Runtime>::default().build_storage().unwrap().0;
t.extend(balances::GenesisConfig::<Runtime> {
balances: vec![(1, 111)],
transaction_base_fee: 10,
@@ -331,7 +331,7 @@ mod tests {
transfer_fee: 0,
creation_fee: 0,
reclaim_rebate: 0,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
let xt = primitives::testing::TestXt(Some(1), 0, Call::transfer(2.into(), 69.into()));
let mut t = runtime_io::TestExternalities::<Blake2Hasher>::new(t);
with_externalities(&mut t, || {
@@ -344,8 +344,8 @@ mod tests {
}
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Runtime>::default().build_storage().unwrap();
t.extend(balances::GenesisConfig::<Runtime>::default().build_storage().unwrap());
let mut t = system::GenesisConfig::<Runtime>::default().build_storage().unwrap().0;
t.extend(balances::GenesisConfig::<Runtime>::default().build_storage().unwrap().0);
t.into()
}
+4 -4
View File
@@ -270,18 +270,18 @@ mod tests {
type Session = Module<Test>;
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
t.extend(consensus::GenesisConfig::<Test>{
code: vec![],
authorities: vec![1, 2, 3],
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(timestamp::GenesisConfig::<Test>{
period: 5,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(GenesisConfig::<Test>{
session_length: 2,
validators: vec![1, 2, 3],
}.build_storage().unwrap());
}.build_storage().unwrap().0);
runtime_io::TestExternalities::new(t)
}
+6 -6
View File
@@ -79,7 +79,7 @@ pub fn new_test_ext(
monied: bool,
reward: u64
) -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
let balance_factor = if ext_deposit > 0 {
256
} else {
@@ -88,11 +88,11 @@ pub fn new_test_ext(
t.extend(consensus::GenesisConfig::<Test>{
code: vec![],
authorities: vec![],
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(session::GenesisConfig::<Test>{
session_length,
validators: vec![10, 20],
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(balances::GenesisConfig::<Test>{
balances: if monied {
if reward > 0 {
@@ -109,7 +109,7 @@ pub fn new_test_ext(
transfer_fee: 0,
creation_fee: 0,
reclaim_rebate: 0,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(GenesisConfig::<Test>{
sessions_per_era,
current_era,
@@ -122,10 +122,10 @@ pub fn new_test_ext(
current_session_reward: reward,
current_offline_slash: 20,
offline_slash_grace: 0,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(timestamp::GenesisConfig::<Test>{
period: 5
}.build_storage().unwrap());
}.build_storage().unwrap().0);
runtime_io::TestExternalities::new(t)
}
+1 -1
View File
@@ -189,7 +189,7 @@ mod tests {
}
add_extra_genesis {
config(_marker) : ::std::marker::PhantomData<T>;
build(|_, _| {});
build(|_, _, _| {});
}
}
}
@@ -644,8 +644,9 @@ macro_rules! __generate_genesis_config {
#[cfg(feature = "std")]
impl<$traitinstance: $traittype> $crate::runtime_primitives::BuildStorage for GenesisConfig<$traitinstance>
{
fn build_storage(self) -> ::std::result::Result<$crate::runtime_primitives::StorageMap, String> {
fn build_storage(self) -> ::std::result::Result<($crate::runtime_primitives::StorageMap, $crate::runtime_primitives::ChildrenStorageMap), String> {
let mut r: $crate::runtime_primitives::StorageMap = Default::default();
let mut c: $crate::runtime_primitives::ChildrenStorageMap = Default::default();
// normal getters
$({
@@ -664,9 +665,9 @@ macro_rules! __generate_genesis_config {
})*
// extra call
$call(&mut r, &self);
$call(&mut r, &mut c, &self);
Ok(r)
Ok((r, c))
}
}
};
@@ -718,7 +719,7 @@ macro_rules! decl_storage {
__impl_store_fns!($traitinstance $($t)*);
__impl_store_metadata!($cratename; $($t)*);
}
__decl_genesis_config_items!([$traittype $traitinstance] [] [] [] [] [|_, _|{}] $($t)*);
__decl_genesis_config_items!([$traittype $traitinstance] [] [] [] [] [|_, _, _|{}] $($t)*);
};
}
@@ -1961,7 +1962,7 @@ mod tests {
}
add_extra_genesis {
config(_marker) : ::std::marker::PhantomData<T>;
build(|_, _| {});
build(|_, _, _| {});
}
}
@@ -2168,7 +2169,7 @@ mod test2 {
add_extra_genesis {
config(_marker) : ::std::marker::PhantomData<T>;
config(extra_field) : u32 = 32;
build(|_, _| {});
build(|_, _, _| {});
}
}
+2 -2
View File
@@ -206,7 +206,7 @@ decl_storage! {
config(changes_trie_config): Option<ChangesTrieConfiguration>;
config(_phantom): ::std::marker::PhantomData<T>;
build(|storage: &mut primitives::StorageMap, config: &GenesisConfig<T>| {
build(|storage: &mut primitives::StorageMap, _: &mut primitives::ChildrenStorageMap, config: &GenesisConfig<T>| {
use codec::Encode;
storage.insert(well_known_keys::EXTRINSIC_INDEX.to_vec(), 0u32.encode());
@@ -467,7 +467,7 @@ mod tests {
type System = Module<Test>;
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
GenesisConfig::<Test>::default().build_storage().unwrap().into()
GenesisConfig::<Test>::default().build_storage().unwrap().0.into()
}
#[test]
+6 -6
View File
@@ -215,8 +215,8 @@ mod tests {
#[test]
fn timestamp_works() {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
t.extend(GenesisConfig::<Test> { period: 0 }.build_storage().unwrap());
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
t.extend(GenesisConfig::<Test> { period: 0 }.build_storage().unwrap().0);
with_externalities(&mut TestExternalities::new(t), || {
Timestamp::set_timestamp(42);
@@ -228,8 +228,8 @@ mod tests {
#[test]
#[should_panic(expected = "Timestamp must be updated only once in the block")]
fn double_timestamp_should_fail() {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
t.extend(GenesisConfig::<Test> { period: 5 }.build_storage().unwrap());
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
t.extend(GenesisConfig::<Test> { period: 5 }.build_storage().unwrap().0);
with_externalities(&mut TestExternalities::new(t), || {
Timestamp::set_timestamp(42);
@@ -241,8 +241,8 @@ mod tests {
#[test]
#[should_panic(expected = "Timestamp must increment by at least <BlockPeriod> between sequential blocks")]
fn block_period_is_enforced() {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
t.extend(GenesisConfig::<Test> { period: 5 }.build_storage().unwrap());
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
t.extend(GenesisConfig::<Test> { period: 5 }.build_storage().unwrap().0);
with_externalities(&mut TestExternalities::new(t), || {
Timestamp::set_timestamp(42);
+3 -3
View File
@@ -325,7 +325,7 @@ mod tests {
type Treasury = Module<Test>;
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap().0;
t.extend(balances::GenesisConfig::<Test>{
balances: vec![(0, 100), (1, 99), (2, 1)],
transaction_base_fee: 0,
@@ -334,13 +334,13 @@ mod tests {
creation_fee: 0,
existential_deposit: 0,
reclaim_rebate: 0,
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.extend(GenesisConfig::<Test>{
proposal_bond: Permill::from_percent(5),
proposal_bond_minimum: 1,
spend_period: 2,
burn: Permill::from_percent(50),
}.build_storage().unwrap());
}.build_storage().unwrap().0);
t.into()
}