diff --git a/substrate/.dockerignore b/substrate/.dockerignore new file mode 100644 index 0000000000..2b0e81eaf0 --- /dev/null +++ b/substrate/.dockerignore @@ -0,0 +1,2 @@ +doc +target diff --git a/substrate/.gitlab-ci.yml b/substrate/.gitlab-ci.yml index 3bca6f6be8..b803ce3d57 100644 --- a/substrate/.gitlab-ci.yml +++ b/substrate/.gitlab-ci.yml @@ -32,7 +32,7 @@ variables: when: on_success expire_in: 1 mos paths: - - target/release/polkadot + - target/release/substrate .determine_version: &determine_version | export VERSION=$(grep -m 1 "version =" Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n") @@ -49,6 +49,7 @@ test:rust:stable: &test script: - ./scripts/init.sh - export PATH="${CI_PROJECT_DIR}/cargo/bin/:$PATH" + - export RUST_BACKTRACE=1 - ./scripts/build.sh - time cargo test --all --release --locked tags: diff --git a/substrate/Cargo.lock b/substrate/Cargo.lock index 6377a2a15c..e3e4b66e00 100644 --- a/substrate/Cargo.lock +++ b/substrate/Cargo.lock @@ -1594,9 +1594,19 @@ name = "node-cli" version = "0.1.0" dependencies = [ "exit-future 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "node-service 0.1.0", + "node-executor 0.1.0", + "node-network 0.1.0", + "node-primitives 0.1.0", + "node-runtime 0.1.0", + "sr-primitives 0.1.0", "substrate-cli 0.3.0", + "substrate-network 0.1.0", + "substrate-primitives 0.1.0", + "substrate-service 0.3.0", + "substrate-service-test 0.3.0", + "substrate-transaction-pool 0.1.0", "tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1713,37 +1723,6 @@ dependencies = [ "substrate-primitives 0.1.0", ] -[[package]] -name = "node-service" -version = "0.1.0" -dependencies = [ - "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", - "hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", - "node-consensus 0.1.0", - "node-executor 0.1.0", - "node-network 0.1.0", - "node-primitives 0.1.0", - "node-runtime 0.1.0", - "parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "rhododendron 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", - "slog 2.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "sr-io 0.1.0", - "sr-primitives 0.1.0", - "substrate-client 0.1.0", - "substrate-keyring 0.1.0", - "substrate-network 0.1.0", - "substrate-primitives 0.1.0", - "substrate-service 0.3.0", - "substrate-service-test 0.3.0", - "substrate-telemetry 0.3.0", - "substrate-test-client 0.1.0", - "substrate-transaction-pool 0.1.0", - "tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "nodrop" version = "0.1.12" @@ -2521,7 +2500,6 @@ name = "sr-api" version = "0.1.0" dependencies = [ "parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 0.1.0", "sr-std 0.1.0", "sr-version 0.1.0", @@ -2555,7 +2533,6 @@ dependencies = [ "serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 0.1.0", "sr-std 0.1.0", - "sr-version 0.1.0", "substrate-primitives 0.1.0", ] @@ -2587,9 +2564,27 @@ dependencies = [ "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", + "sr-primitives 0.1.0", "sr-std 0.1.0", ] +[[package]] +name = "srml-assets" +version = "0.1.0" +dependencies = [ + "hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", + "sr-io 0.1.0", + "sr-primitives 0.1.0", + "sr-std 0.1.0", + "srml-support 0.1.0", + "srml-system 0.1.0", + "substrate-primitives 0.1.0", +] + [[package]] name = "srml-balances" version = "0.1.0" @@ -2806,6 +2801,7 @@ version = "0.1.0" dependencies = [ "hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 0.1.0", diff --git a/substrate/Cargo.toml b/substrate/Cargo.toml index bc0fc5feef..f47fa42507 100644 --- a/substrate/Cargo.toml +++ b/substrate/Cargo.toml @@ -39,6 +39,7 @@ members = [ "core/transaction-pool", "core/transaction-pool/graph", "srml/support", + "srml/assets", "srml/balances", "srml/consensus", "srml/contract", @@ -68,7 +69,6 @@ members = [ "node/network", "node/primitives", "node/runtime", - "node/service", "subkey", ] exclude = [ diff --git a/substrate/Dockerfile b/substrate/Dockerfile new file mode 100644 index 0000000000..87c33c859f --- /dev/null +++ b/substrate/Dockerfile @@ -0,0 +1,38 @@ +FROM phusion/baseimage:0.10.1 as builder +LABEL maintainer "chevdor@gmail.com" +LABEL description="This is the build stage for Substrate. Here we create the binary." + +ARG PROFILE=release +WORKDIR /substrate + +COPY . /substrate + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y cmake pkg-config libssl-dev git + +RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && \ + export PATH=$PATH:$HOME/.cargo/bin && \ + cargo build --$PROFILE + +# ===== SECOND STAGE ====== + +FROM phusion/baseimage:0.10.0 +LABEL maintainer "chevdor@gmail.com" +LABEL description="This is the 2nd stage: a very small image where we copy the Substrate binary." +ARG PROFILE=release +COPY --from=builder /substrate/target/$PROFILE/substrate /usr/local/bin + +RUN mv /usr/share/ca* /tmp && \ + rm -rf /usr/share/* && \ + mv /tmp/ca-certificates /usr/share/ && \ + rm -rf /usr/lib/python* && \ + mkdir -p /root/.local/share/Substrate && \ + ln -s /root/.local/share/Substrate /data + +RUN rm -rf /usr/bin /usr/sbin + +EXPOSE 30333 9933 9944 +VOLUME ["/data"] + +CMD ["/usr/local/bin/substrate"] diff --git a/substrate/README.adoc b/substrate/README.adoc index 01d4788705..f0aa15c4f9 100644 --- a/substrate/README.adoc +++ b/substrate/README.adoc @@ -114,6 +114,54 @@ Inherent extrinsic knowledge is again somewhat generic, and the actual construct - DAO runtime module - Audit +== Trying out Substrate Node + +Substate Node is Substrate's pre-baked blockchain client. You can run a development node locally or configure a new chain and launch your own global testnet. + +=== On Mac + +To get going as fast as possible, there is a simple script that installs all required dependencies and installs Substrate into your path. Just open a terminal and run: + +[source, shell] +---- +curl https://raw.githubusercontent.com/paritytech/substrate/master/scripts/getgoing.sh -sSf | sh +---- + +You can start a local Substrate development chain with running `substrate --dev`. + +To create your own global testnet, you'll need to make a new Substrate Node chain specification file ("chainspec"). + +First let's get a template chainspec that you can edit. We'll use the "staging" chain, a sort of default chain that the node comes pre-configured with: + +[source, shell] +---- +substrate build-spec --chain=staging > ~/chainspec.json +---- + +Now, edit `~/chainspec.json` in your editor. There are a lot of individual fields for each module, and one very large one which contains the Webassembly code blob for this chain. The easiest field to edit is the block `period`. Change it to 10 (seconds): + +[source, json] +---- + "timestamp": { + "period": 10 + }, +---- + +[source, shell] +---- +substrate build-spec --chain ~/chainspec.json --raw > ~/mychain.json +---- + +[source, shell] +---- +substrate --chain ~/mychain.json +---- + +[source, shell] +---- +substrate --chain ~/mychain.json --validator --key ... +---- + == Building diff --git a/substrate/core/client/db/src/cache/list_cache.rs b/substrate/core/client/db/src/cache/list_cache.rs index 59e49fbff3..6d95349ee3 100644 --- a/substrate/core/client/db/src/cache/list_cache.rs +++ b/substrate/core/client/db/src/cache/list_cache.rs @@ -91,7 +91,7 @@ pub struct Fork { head: Entry, } -/// Outcome of Fork::try_append_or_fork. +/// Outcome of Fork::try_append_or_fork. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub enum ForkAppendResult { @@ -356,7 +356,7 @@ impl> ListCache // if there's an entry at this block: // - remove reference from this entry to the previous entry - // - destroy fork starting with previous entry + // - destroy fork starting with previous entry let current_entry = match self.storage.read_entry(&ancient_block)? { Some(current_entry) => current_entry, None => return Ok(()), @@ -583,12 +583,12 @@ fn read_forks>( #[cfg(test)] pub mod tests { - use runtime_primitives::testing::{Header, Block as RawBlock}; + use runtime_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; use runtime_primitives::traits::Header as HeaderT; use cache::list_storage::tests::{DummyStorage, FaultyStorage, DummyTransaction}; use super::*; - type Block = RawBlock; + type Block = RawBlock>; pub fn test_id(number: u64) -> ComplexBlockId { ComplexBlockId::new(From::from(number), number) @@ -834,7 +834,7 @@ pub mod tests { // when trying to insert block @ finalized number assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100)) .on_block_insert(&mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), false).unwrap().is_none()); - + // when trying to insert non-final block AND it appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block let mut cache = ListCache::new( diff --git a/substrate/core/client/db/src/cache/list_storage.rs b/substrate/core/client/db/src/cache/list_storage.rs index f1e1ccff19..ea3fbb94ac 100644 --- a/substrate/core/client/db/src/cache/list_storage.rs +++ b/substrate/core/client/db/src/cache/list_storage.rs @@ -23,7 +23,7 @@ use kvdb::{KeyValueDB, DBTransaction}; use client::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult}; use codec::{Encode, Decode}; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Block as BlockT, NumberFor}; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use utils::{self, db_err, meta_keys}; use cache::{CacheItemT, ComplexBlockId}; @@ -126,7 +126,8 @@ impl DbStorage { impl Storage for DbStorage { fn read_id(&self, at: NumberFor) -> ClientResult> { - utils::read_id::(&*self.db, self.columns.hash_lookup, BlockId::Number(at)) + utils::read_header::(&*self.db, self.columns.hash_lookup, self.columns.header, BlockId::Number(at)) + .map(|maybe_header| maybe_header.map(|header| header.hash())) } fn read_header(&self, at: &Block::Hash) -> ClientResult> { @@ -246,7 +247,6 @@ mod meta { #[cfg(test)] pub mod tests { use std::collections::{HashMap, HashSet}; - use runtime_primitives::traits::Header as HeaderT; use super::*; pub struct FaultyStorage; diff --git a/substrate/core/client/db/src/cache/mod.rs b/substrate/core/client/db/src/cache/mod.rs index 3d3b6a22da..788ad8b61e 100644 --- a/substrate/core/client/db/src/cache/mod.rs +++ b/substrate/core/client/db/src/cache/mod.rs @@ -194,10 +194,11 @@ impl BlockchainCache for DbCacheSync { ComplexBlockId::new(hash, *header.number()) }, BlockId::Number(number) => { - let hash = utils::read_id::( + let hash = utils::read_header::( &**db, columns.hash_lookup, - BlockId::Number(number.clone())).ok()??; + columns.header, + BlockId::Number(number.clone())).ok()??.hash(); ComplexBlockId::new(hash, number) }, }; diff --git a/substrate/core/client/db/src/lib.rs b/substrate/core/client/db/src/lib.rs index 267d28729b..f35dd8a05d 100644 --- a/substrate/core/client/db/src/lib.rs +++ b/substrate/core/client/db/src/lib.rs @@ -75,7 +75,7 @@ use runtime_primitives::BuildStorage; use state_machine::backend::Backend as StateBackend; use executor::RuntimeInfo; use state_machine::{CodeExecutor, DBValue, ExecutionStrategy}; -use utils::{Meta, db_err, meta_keys, open_database, read_db, read_id, read_meta}; +use utils::{Meta, db_err, meta_keys, open_database, read_db, block_id_to_lookup_key, read_meta}; use client::LeafSet; use state_db::StateDb; pub use state_db::PruningMode; @@ -118,6 +118,7 @@ mod columns { pub const META: Option = ::utils::COLUMN_META; pub const STATE: Option = Some(1); pub const STATE_META: Option = Some(2); + /// maps hashes to lookup keys pub const HASH_LOOKUP: Option = Some(3); pub const HEADER: Option = Some(4); pub const BODY: Option = Some(5); @@ -219,15 +220,20 @@ impl client::blockchain::HeaderBackend for BlockchainDb Result::Number>, client::error::Error> { - self.header(BlockId::Hash(hash)).and_then(|key| match key { - Some(hdr) => Ok(Some(hdr.number().clone())), - None => Ok(None), - }) + fn number(&self, hash: Block::Hash) -> Result>, client::error::Error> { + if let Some(lookup_key) = block_id_to_lookup_key::(&*self.db, columns::HASH_LOOKUP, BlockId::Hash(hash))? { + let number = utils::lookup_key_to_number(&lookup_key)?; + Ok(Some(number)) + } else { + Ok(None) + } } - fn hash(&self, number: ::Number) -> Result, client::error::Error> { - read_id::(&*self.db, columns::HASH_LOOKUP, BlockId::Number(number)) + fn hash(&self, number: NumberFor) -> Result, client::error::Error> { + self.header(BlockId::Number(number)).and_then(|maybe_header| match maybe_header { + Some(header) => Ok(Some(header.hash().clone())), + None => Ok(None), + }) } } @@ -495,12 +501,9 @@ impl Backend { let hash = if new_canonical == number_u64 { hash } else { - read_id::( - &*self.blockchain.db, - columns::HASH_LOOKUP, - BlockId::Number(As::sa(new_canonical)) - )?.expect("existence of block with number `new_canonical` \ - implies existence of blocks with all nubmers before it; qed") + ::client::blockchain::HeaderBackend::hash(&self.blockchain, As::sa(new_canonical))? + .expect("existence of block with number `new_canonical` \ + implies existence of blocks with all numbers before it; qed") }; trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); @@ -588,14 +591,6 @@ impl client::backend::Backend for Backend whe let parent_hash = *pending_block.header.parent_hash(); let number = pending_block.header.number().clone(); - transaction.put(columns::HEADER, hash.as_ref(), &pending_block.header.encode()); - if let Some(body) = pending_block.body { - transaction.put(columns::BODY, hash.as_ref(), &body.encode()); - } - if let Some(justification) = pending_block.justification { - transaction.put(columns::JUSTIFICATION, hash.as_ref(), &justification.encode()); - } - if pending_block.leaf_state.is_best() { let meta = self.blockchain.meta.read(); @@ -607,7 +602,7 @@ impl client::backend::Backend for Backend whe BlockId::Hash(parent_hash), )?; - // update block number to hash lookup entries. + // uncanonicalize for retracted in tree_route.retracted() { if retracted.hash == meta.finalized_hash { warn!("Potential safety failure: reverting finalized block {:?}", @@ -616,30 +611,94 @@ impl client::backend::Backend for Backend whe return Err(::client::error::ErrorKind::NotInFinalizedChain.into()); } - transaction.delete( - columns::HASH_LOOKUP, - &::utils::number_to_lookup_key(retracted.number) - ); + let prev_lookup_key = ::utils::number_to_lookup_key(retracted.number); + let new_lookup_key = ::utils::number_and_hash_to_lookup_key(retracted.number, retracted.hash); + + // change mapping from `number -> header` + // to `number + hash -> header` + let retracted_header = if let Some(header) = ::client::blockchain::HeaderBackend::::header(&self.blockchain, BlockId::Number(retracted.number))? { + header + } else { + return Err(client::error::ErrorKind::UnknownBlock(format!("retracted {:?}", retracted)).into()); + }; + transaction.delete(columns::HEADER, &prev_lookup_key); + transaction.put(columns::HEADER, &new_lookup_key, &retracted_header.encode()); + + // if body is stored + // change mapping from `number -> body` + // to `number + hash -> body` + if let Some(retracted_body) = ::client::blockchain::Backend::::body(&self.blockchain, BlockId::Number(retracted.number))? { + transaction.delete(columns::BODY, &prev_lookup_key); + transaction.put(columns::BODY, &new_lookup_key, &retracted_body.encode()); + } + + // if justification is stored + // change mapping from `number -> justification` + // to `number + hash -> justification` + if let Some(retracted_justification) = ::client::blockchain::Backend::::justification(&self.blockchain, BlockId::Number(retracted.number))? { + transaction.delete(columns::JUSTIFICATION, &prev_lookup_key); + transaction.put(columns::JUSTIFICATION, &new_lookup_key, &retracted_justification.encode()); + } + + transaction.put(columns::HASH_LOOKUP, retracted.hash.as_ref(), &new_lookup_key); } + // canonicalize for enacted in tree_route.enacted() { - let hash: &Block::Hash = &enacted.hash; - transaction.put( - columns::HASH_LOOKUP, - &::utils::number_to_lookup_key(enacted.number), - hash.as_ref(), - ) + let prev_lookup_key = ::utils::number_and_hash_to_lookup_key(enacted.number, enacted.hash); + let new_lookup_key = ::utils::number_to_lookup_key(enacted.number); + + // change mapping from `number + hash -> header` + // to `number -> header` + let enacted_header = if let Some(header) = ::client::blockchain::HeaderBackend::::header(&self.blockchain, BlockId::Number(enacted.number))? { + header + } else { + return Err(client::error::ErrorKind::UnknownBlock(format!("enacted {:?}", enacted)).into()); + }; + transaction.delete(columns::HEADER, &prev_lookup_key); + transaction.put(columns::HEADER, &new_lookup_key, &enacted_header.encode()); + + // if body is stored + // change mapping from `number + hash -> body` + // to `number -> body` + if let Some(enacted_body) = ::client::blockchain::Backend::::body(&self.blockchain, BlockId::Number(enacted.number))? { + transaction.delete(columns::BODY, &prev_lookup_key); + transaction.put(columns::BODY, &new_lookup_key, &enacted_body.encode()); + } + + // if justification is stored + // change mapping from `number -> justification` + // to `number + hash -> justification` + if let Some(enacted_justification) = ::client::blockchain::Backend::::justification(&self.blockchain, BlockId::Number(enacted.number))? { + transaction.delete(columns::JUSTIFICATION, &prev_lookup_key); + transaction.put(columns::JUSTIFICATION, &new_lookup_key, &enacted_justification.encode()); + } + + transaction.put(columns::HASH_LOOKUP, enacted.hash.as_ref(), &new_lookup_key); } } - transaction.put( - columns::HASH_LOOKUP, - &::utils::number_to_lookup_key(number), - hash.as_ref() - ); transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref()); } + // blocks in longest chain are keyed by number + let lookup_key = if pending_block.leaf_state.is_best() { + ::utils::number_to_lookup_key(number).to_vec() + } else { + // other blocks are keyed by number + hash + ::utils::number_and_hash_to_lookup_key(number, hash) + }; + + transaction.put(columns::HEADER, &lookup_key, &pending_block.header.encode()); + if let Some(body) = pending_block.body { + transaction.put(columns::BODY, &lookup_key, &body.encode()); + } + if let Some(justification) = pending_block.justification { + transaction.put(columns::JUSTIFICATION, &lookup_key, &justification.encode()); + } + + transaction.put(columns::HASH_LOOKUP, hash.as_ref(), &lookup_key); + if number == Zero::zero() { transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref()); transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); @@ -732,14 +791,14 @@ impl client::backend::Backend for Backend whe match self.storage.state_db.revert_one() { Some(commit) => { apply_state_commit(&mut transaction, commit); - let removed = best.clone(); + let _removed = best.clone(); best -= As::sa(1); let header = self.blockchain.header(BlockId::Number(best))?.ok_or_else( || client::error::ErrorKind::UnknownBlock( format!("Error reverting to {}. Block header not found.", best)))?; transaction.put(columns::META, meta_keys::BEST_BLOCK, header.hash().as_ref()); - transaction.delete(columns::HASH_LOOKUP, &::utils::number_to_lookup_key(removed)); + transaction.delete(columns::HASH_LOOKUP, header.hash().as_ref()); self.storage.db.write(transaction).map_err(db_err)?; self.blockchain.update_meta(header.hash().clone(), best.clone(), true, false); self.blockchain.leaves.write().revert(header.hash().clone(), header.number().clone(), header.parent_hash().clone()); @@ -806,11 +865,11 @@ mod tests { use client::backend::Backend as BTrait; use client::backend::BlockImportOperation as Op; use client::blockchain::HeaderBackend as BlockchainHeaderBackend; - use runtime_primitives::testing::{Header, Block as RawBlock}; + use runtime_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; use state_machine::{TrieMut, TrieDBMut, ChangesTrieRootsStorage, ChangesTrieStorage}; use test_client; - type Block = RawBlock; + type Block = RawBlock>; fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { let mut changes_root = H256::default(); diff --git a/substrate/core/client/db/src/light.rs b/substrate/core/client/db/src/light.rs index 170882ceed..49e5e85be1 100644 --- a/substrate/core/client/db/src/light.rs +++ b/substrate/core/client/db/src/light.rs @@ -34,7 +34,7 @@ use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero, One, As, NumberFor}; use cache::{DbCacheSync, DbCache, ComplexBlockId}; use utils::{meta_keys, Meta, db_err, number_to_lookup_key, open_database, - read_db, read_id, read_meta}; + read_db, block_id_to_lookup_key, read_meta}; use DatabaseSettings; pub(crate) mod columns { @@ -168,14 +168,16 @@ impl BlockchainHeaderBackend for LightStorage } fn number(&self, hash: Block::Hash) -> ClientResult::Header as HeaderT>::Number>> { - self.header(BlockId::Hash(hash)).and_then(|key| match key { - Some(hdr) => Ok(Some(hdr.number().clone())), - None => Ok(None), - }) + if let Some(lookup_key) = block_id_to_lookup_key::(&*self.db, columns::HASH_LOOKUP, BlockId::Hash(hash))? { + let number = ::utils::lookup_key_to_number(&lookup_key)?; + Ok(Some(number)) + } else { + Ok(None) + } } fn hash(&self, number: <::Header as HeaderT>::Number) -> ClientResult> { - read_id::(&*self.db, columns::HASH_LOOKUP, BlockId::Number(number)) + Ok(self.header(BlockId::Number(number))?.map(|header| header.hash().clone())) } } @@ -212,13 +214,13 @@ impl LightStorage { trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number); while prune_block <= new_cht_end { - let id = read_id::(&*self.db, columns::HASH_LOOKUP, BlockId::Number(prune_block))?; - if let Some(hash) = id { - let lookup_key = number_to_lookup_key(prune_block); - transaction.delete(columns::HASH_LOOKUP, &lookup_key); - transaction.delete(columns::HEADER, hash.as_ref()); + if let Some(hash) = self.hash(prune_block)? { + let lookup_key = block_id_to_lookup_key::(&*self.db, columns::HASH_LOOKUP, BlockId::Number(prune_block))? + .expect("retrieved hash for `prune_block` right above. therefore retrieving lookup key must succeed. q.e.d."); + transaction.delete(columns::HASH_LOOKUP, hash.as_ref()); + transaction.delete(columns::HEADER, &lookup_key); } - prune_block += <::Header as HeaderT>::Number::one(); + prune_block += NumberFor::::one(); } } } @@ -242,8 +244,6 @@ impl LightBlockchainStorage for LightStorage let number = *header.number(); let parent_hash = *header.parent_hash(); - transaction.put(columns::HEADER, hash.as_ref(), &header.encode()); - if leaf_state.is_best() { // handle reorg. { @@ -263,27 +263,55 @@ impl LightBlockchainStorage for LightStorage (&retracted.number, &retracted.hash)); } - transaction.delete( - columns::HASH_LOOKUP, - &::utils::number_to_lookup_key(retracted.number) - ); + let prev_lookup_key = ::utils::number_to_lookup_key(retracted.number); + let new_lookup_key = ::utils::number_and_hash_to_lookup_key(retracted.number, retracted.hash); + + // change mapping from `number -> header` + // to `number + hash -> header` + let retracted_header = if let Some(header) = self.header(BlockId::Number(retracted.number))? { + header + } else { + return Err(::client::error::ErrorKind::UnknownBlock(format!("retracted {:?}", retracted)).into()); + }; + transaction.delete(columns::HEADER, &prev_lookup_key); + transaction.put(columns::HEADER, &new_lookup_key, &retracted_header.encode()); + + transaction.put(columns::HASH_LOOKUP, retracted.hash.as_ref(), &new_lookup_key); } for enacted in tree_route.enacted() { - let hash: &Block::Hash = &enacted.hash; - transaction.put( - columns::HASH_LOOKUP, - &::utils::number_to_lookup_key(enacted.number), - hash.as_ref(), - ) + let prev_lookup_key = ::utils::number_and_hash_to_lookup_key(enacted.number, enacted.hash); + let new_lookup_key = ::utils::number_to_lookup_key(enacted.number); + + // change mapping from `number + hash -> header` + // to `number -> header` + let enacted_header = if let Some(header) = self.header(BlockId::Number(enacted.number))? { + header + } else { + return Err(::client::error::ErrorKind::UnknownBlock(format!("enacted {:?}", enacted)).into()); + }; + transaction.delete(columns::HEADER, &prev_lookup_key); + transaction.put(columns::HEADER, &new_lookup_key, &enacted_header.encode()); + + transaction.put(columns::HASH_LOOKUP, enacted.hash.as_ref(), &new_lookup_key); } } } transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref()); - transaction.put(columns::HASH_LOOKUP, &number_to_lookup_key(number), hash.as_ref()); } + // blocks in longest chain are keyed by number + let lookup_key = if leaf_state.is_best() { + ::utils::number_to_lookup_key(number).to_vec() + } else { + // other blocks are keyed by number + hash + ::utils::number_and_hash_to_lookup_key(number, hash) + }; + + transaction.put(columns::HEADER, &lookup_key, &header.encode()); + transaction.put(columns::HASH_LOOKUP, hash.as_ref(), &lookup_key); + let finalized = match leaf_state { NewBlockState::Final => true, _ => false, @@ -374,10 +402,10 @@ impl LightBlockchainStorage for LightStorage #[cfg(test)] pub(crate) mod tests { use client::cht; - use runtime_primitives::testing::{H256 as Hash, Header, Block as RawBlock}; + use runtime_primitives::testing::{H256 as Hash, Header, Block as RawBlock, ExtrinsicWrapper}; use super::*; - type Block = RawBlock; + type Block = RawBlock>; fn prepare_header(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header { Header { @@ -512,6 +540,7 @@ pub(crate) mod tests { prev_hash = insert_block(&db, &prev_hash, 1 + number, None); } assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE) as usize); + assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), (1 + cht::SIZE) as usize); assert_eq!(db.db.iter(columns::CHT).count(), 0); // insert next SIZE blocks && ensure that nothing is pruned @@ -519,12 +548,14 @@ pub(crate) mod tests { prev_hash = insert_block(&db, &prev_hash, 1 + cht::SIZE + number, None); } assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + cht::SIZE) as usize); + assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), (1 + cht::SIZE + cht::SIZE) as usize); assert_eq!(db.db.iter(columns::CHT).count(), 0); // insert block #{2 * cht::SIZE + 1} && check that new CHT is created + headers of this CHT are pruned // nothing is yet finalized, so nothing is pruned. prev_hash = insert_block(&db, &prev_hash, 1 + cht::SIZE + cht::SIZE, None); assert_eq!(db.db.iter(columns::HEADER).count(), (2 + cht::SIZE + cht::SIZE) as usize); + assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), (2 + cht::SIZE + cht::SIZE) as usize); assert_eq!(db.db.iter(columns::CHT).count(), 0); // now finalize the block. @@ -533,6 +564,7 @@ pub(crate) mod tests { } db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + 1) as usize); + assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), (1 + cht::SIZE + 1) as usize); assert_eq!(db.db.iter(columns::CHT).count(), 1); assert!((0..cht::SIZE).all(|i| db.db.get(columns::HEADER, &number_to_lookup_key(1 + i)).unwrap().is_none())); } diff --git a/substrate/core/client/db/src/utils.rs b/substrate/core/client/db/src/utils.rs index b155fb87e9..b32b56bccc 100644 --- a/substrate/core/client/db/src/utils.rs +++ b/substrate/core/client/db/src/utils.rs @@ -67,10 +67,11 @@ pub struct Meta { } /// A block lookup key: used for canonical lookup from block number to hash -pub type BlockLookupKey = [u8; 4]; +pub type ShortBlockLookupKey = [u8; 4]; -/// Convert block number into lookup key (LE representation). -pub fn number_to_lookup_key(n: N) -> BlockLookupKey where N: As { +/// Convert block number into short lookup key (LE representation) for +/// blocks that are in the canonical chain. +pub fn number_to_lookup_key(n: N) -> ShortBlockLookupKey where N: As { let n: u64 = n.as_(); assert!(n & 0xffffffff00000000 == 0); @@ -82,6 +83,49 @@ pub fn number_to_lookup_key(n: N) -> BlockLookupKey where N: As { ] } +/// Convert number and hash into long lookup key for blocks that are +/// not in the canonical chain. +pub fn number_and_hash_to_lookup_key(number: N, hash: H) -> Vec where + N: As, + H: AsRef<[u8]> +{ + let mut lookup_key = number_to_lookup_key(number).to_vec(); + lookup_key.extend_from_slice(hash.as_ref()); + lookup_key +} + +/// Convert block lookup key into block number. +/// all block lookup keys start with the block number. +pub fn lookup_key_to_number(key: &[u8]) -> client::error::Result where N: As { + if key.len() < 4 { + return Err(client::error::ErrorKind::Backend("Invalid block key".into()).into()); + } + Ok((key[0] as u64) << 24 + | (key[1] as u64) << 16 + | (key[2] as u64) << 8 + | (key[3] as u64)).map(As::sa) +} + +/// Convert block id to block lookup key. +/// block lookup key is the DB-key header, block and justification are stored under. +/// looks up lookup key by hash from DB as necessary. +pub fn block_id_to_lookup_key( + db: &KeyValueDB, + hash_lookup_col: Option, + id: BlockId +) -> Result>, client::error::Error> where + Block: BlockT, +{ + match id { + // numbers are solely looked up in canonical chain + BlockId::Number(n) => Ok(Some(number_to_lookup_key(n).to_vec())), + BlockId::Hash(h) => db.get(hash_lookup_col, h.as_ref()).map(|v| + v.map(|v| { v.into_vec() }) + ).map_err(db_err), + } +} + + /// Maps database error to client error pub fn db_err(err: io::Error) -> client::error::Error { use std::error::Error; @@ -113,33 +157,12 @@ pub fn open_database(config: &DatabaseSettings, col_meta: Option, db_type: Ok(Arc::new(db)) } -/// Convert block id to block key, looking up canonical hash by number from DB as necessary. -pub fn read_id(db: &KeyValueDB, col_index: Option, id: BlockId) -> Result, client::error::Error> - where - Block: BlockT, -{ - match id { - BlockId::Hash(h) => Ok(Some(h)), - BlockId::Number(n) => db.get(col_index, &number_to_lookup_key(n)).map(|v| - v.map(|v| { - let mut h = ::default(); - { - let h = h.as_mut(); - let len = ::std::cmp::min(v.len(), h.len()); - h.as_mut().copy_from_slice(&v[..len]); - } - h - }) - ).map_err(db_err), - } -} - /// Read database column entry for the given block. pub fn read_db(db: &KeyValueDB, col_index: Option, col: Option, id: BlockId) -> client::error::Result> where Block: BlockT, { - read_id(db, col_index, id).and_then(|key| match key { + block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { Some(key) => db.get(col, key.as_ref()).map_err(db_err), None => Ok(None), }) diff --git a/substrate/core/client/src/cht.rs b/substrate/core/client/src/cht.rs index 54978b7932..fd8e07c3dd 100644 --- a/substrate/core/client/src/cht.rs +++ b/substrate/core/client/src/cht.rs @@ -89,7 +89,7 @@ pub fn build_proof( { let transaction = build_pairs::(cht_size, cht_num, hashes)? .into_iter() - .map(|(k, v)| (k, Some(v))) + .map(|(k, v)| (None, k, Some(v))) .collect::>(); let storage = InMemoryState::::default().update(transaction); let (value, proof) = prove_read(storage, &encode_cht_key(block_num)).ok()?; @@ -205,7 +205,7 @@ pub fn decode_cht_value(value: &[u8]) -> Option { 32 => Some(H256::from_slice(&value[0..32])), _ => None, } - + } #[cfg(test)] diff --git a/substrate/core/client/src/client.rs b/substrate/core/client/src/client.rs index 9122e1692c..2567e43a95 100644 --- a/substrate/core/client/src/client.rs +++ b/substrate/core/client/src/client.rs @@ -574,7 +574,7 @@ impl Client where for tx in extrinsics { let tx = api::TaggedTransactionQueue::validate_transaction(self, &id, &tx)?; match tx { - TransactionValidity::Valid(_, _, mut provides, ..) => { + TransactionValidity::Valid { mut provides, .. } => { tags.append(&mut provides); }, // silently ignore invalid extrinsics, @@ -1128,12 +1128,12 @@ impl api::BlockBuilder for Client where self.call_api_at(at, "inherent_extrinsics", &(inherent)) } - fn check_inherents( + fn check_inherents( &self, at: &BlockId, block: &Block, data: &InherentData - ) -> Result, Self::Error> { + ) -> Result, Self::Error> { self.call_api_at(at, "check_inherents", &(block, data)) } diff --git a/substrate/core/client/src/light/backend.rs b/substrate/core/client/src/light/backend.rs index 9ee420140a..e83640aa6c 100644 --- a/substrate/core/client/src/light/backend.rs +++ b/substrate/core/client/src/light/backend.rs @@ -201,7 +201,7 @@ impl StateBackend for OnDemandState S: BlockchainStorage, F: Fetcher, H: Hasher, - + { type Error = ClientError; type Transaction = (); @@ -227,15 +227,32 @@ impl StateBackend for OnDemandState .into_future().wait() } + fn child_storage(&self, _storage_key: &[u8], _key: &[u8]) -> ClientResult>> { + Err(ClientErrorKind::NotAvailableOnLightClient.into()) + } + fn for_keys_with_prefix(&self, _prefix: &[u8], _action: A) { // whole state is not available on light node } + fn for_keys_in_child_storage(&self, _storage_key: &[u8], _action: A) { + // whole state is not available on light node + } + fn storage_root(&self, _delta: I) -> (H::Out, Self::Transaction) - where I: IntoIterator, Option>)> { + where + I: IntoIterator, Option>)> + { (H::Out::default(), ()) } + fn child_storage_root(&self, _key: &[u8], _delta: I) -> (Vec, Self::Transaction) + where + I: IntoIterator, Option>)> + { + (H::Out::default().as_ref().to_vec(), ()) + } + fn pairs(&self) -> Vec<(Vec, Vec)> { // whole state is not available on light node Vec::new() diff --git a/substrate/core/client/src/notifications.rs b/substrate/core/client/src/notifications.rs index 320ede9491..1856932ca9 100644 --- a/substrate/core/client/src/notifications.rs +++ b/substrate/core/client/src/notifications.rs @@ -179,11 +179,10 @@ impl StorageNotifications { #[cfg(test)] mod tests { - use runtime_primitives::testing::{H256 as Hash, Block as RawBlock}; + use runtime_primitives::testing::{H256 as Hash, Block as RawBlock, ExtrinsicWrapper}; use super::*; use futures::Stream; - #[cfg(test)] impl From)>> for StorageChangeSet { fn from(changes: Vec<(StorageKey, Option)>) -> Self { @@ -201,7 +200,7 @@ mod tests { } } - type Block = RawBlock; + type Block = RawBlock>; #[test] fn triggering_change_should_notify_wildcard_listeners() { diff --git a/substrate/core/executor/src/wasm_executor.rs b/substrate/core/executor/src/wasm_executor.rs index 2d626bf1cf..56624060e7 100644 --- a/substrate/core/executor/src/wasm_executor.rs +++ b/substrate/core/executor/src/wasm_executor.rs @@ -181,6 +181,46 @@ impl_function_executor!(this: FunctionExecutor<'e, E>, this.ext.set_storage(key, value); Ok(()) }, + ext_set_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, value_data: *const u8, value_len: u32) => { + let storage_key = this.memory.get(storage_key_data, storage_key_len as usize).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_set_child_storage"))?; + let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_set_child_storage"))?; + let value = this.memory.get(value_data, value_len as usize).map_err(|_| UserError("Invalid attempt to determine value in ext_set_child_storage"))?; + if let Some(_preimage) = this.hash_lookup.get(&key) { + debug_trace!( + target: "wasm-trace", "*** Setting child storage: {} -> %{} -> {} [k={}]", + ::primitives::hexdisplay::ascii_format(&storage_key), + ::primitives::hexdisplay::ascii_format(&_preimage), + HexDisplay::from(&value), + HexDisplay::from(&key) + ); + } else { + debug_trace!( + target: "wasm-trace", "*** Setting child storage: {} -> {} -> {} [k={}]", + ::primitives::hexdisplay::ascii_format(&storage_key), + ::primitives::hexdisplay::ascii_format(&key), + HexDisplay::from(&value), + HexDisplay::from(&key) + ); + } + this.ext.set_child_storage(storage_key, key, value); + Ok(()) + }, + ext_clear_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32) => { + let storage_key = this.memory.get( + storage_key_data, + storage_key_len as usize + ).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_clear_child_storage"))?; + let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_clear_child_storage"))?; + debug_trace!(target: "wasm-trace", "*** Clearing child storage: {} -> {} [k={}]", + ::primitives::hexdisplay::ascii_format(&storage_key), + if let Some(_preimage) = this.hash_lookup.get(&key) { + format!("%{}", ::primitives::hexdisplay::ascii_format(&_preimage)) + } else { + format!(" {}", ::primitives::hexdisplay::ascii_format(&key)) + }, HexDisplay::from(&key)); + this.ext.clear_child_storage(&storage_key, &key); + Ok(()) + }, ext_clear_storage(key_data: *const u8, key_len: u32) => { let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_clear_storage"))?; debug_trace!(target: "wasm-trace", "*** Clearing storage: {} [k={}]", @@ -196,14 +236,33 @@ impl_function_executor!(this: FunctionExecutor<'e, E>, let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_exists_storage"))?; Ok(if this.ext.exists_storage(&key) { 1 } else { 0 }) }, + ext_exists_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32) -> u32 => { + let storage_key = this.memory.get( + storage_key_data, + storage_key_len as usize + ).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_exists_child_storage"))?; + let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_exists_child_storage"))?; + Ok(if this.ext.exists_child_storage(&storage_key, &key) { 1 } else { 0 }) + }, ext_clear_prefix(prefix_data: *const u8, prefix_len: u32) => { let prefix = this.memory.get(prefix_data, prefix_len as usize).map_err(|_| UserError("Invalid attempt to determine prefix in ext_clear_prefix"))?; this.ext.clear_prefix(&prefix); Ok(()) }, + ext_kill_child_storage(storage_key_data: *const u8, storage_key_len: u32) => { + let storage_key = this.memory.get( + storage_key_data, + storage_key_len as usize + ).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_kill_child_storage"))?; + this.ext.kill_child_storage(&storage_key); + Ok(()) + }, // return 0 and place u32::max_value() into written_out if no value exists for the key. ext_get_allocated_storage(key_data: *const u8, key_len: u32, written_out: *mut u32) -> *mut u8 => { - let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_get_allocated_storage"))?; + let key = this.memory.get( + key_data, + key_len as usize + ).map_err(|_| UserError("Invalid attempt to determine key in ext_get_allocated_storage"))?; let maybe_value = this.ext.storage(&key); debug_trace!(target: "wasm-trace", "*** Getting storage: {} == {} [k={}]", @@ -213,9 +272,9 @@ impl_function_executor!(this: FunctionExecutor<'e, E>, format!(" {}", ::primitives::hexdisplay::ascii_format(&key)) }, if let Some(ref b) = maybe_value { - format!("{}", HexDisplay::from(b)) + &format!("{}", HexDisplay::from(b)) } else { - "".to_owned() + "" }, HexDisplay::from(&key) ); @@ -232,6 +291,45 @@ impl_function_executor!(this: FunctionExecutor<'e, E>, Ok(0) } }, + // return 0 and place u32::max_value() into written_out if no value exists for the key. + ext_get_allocated_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, written_out: *mut u32) -> *mut u8 => { + let storage_key = this.memory.get( + storage_key_data, + storage_key_len as usize + ).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_get_allocated_child_storage"))?; + let key = this.memory.get( + key_data, + key_len as usize + ).map_err(|_| UserError("Invalid attempt to determine key in ext_get_allocated_child_storage"))?; + let maybe_value = this.ext.child_storage(&storage_key, &key); + + debug_trace!(target: "wasm-trace", "*** Getting child storage: {} -> {} == {} [k={}]", + ::primitives::hexdisplay::ascii_format(&storage_key), + if let Some(_preimage) = this.hash_lookup.get(&key) { + format!("%{}", ::primitives::hexdisplay::ascii_format(&_preimage)) + } else { + format!(" {}", ::primitives::hexdisplay::ascii_format(&key)) + }, + if let Some(ref b) = maybe_value { + &format!("{}", HexDisplay::from(b)) + } else { + "" + }, + HexDisplay::from(&key) + ); + + if let Some(value) = maybe_value { + let offset = this.heap.allocate(value.len() as u32) as u32; + this.memory.set(offset, &value).map_err(|_| UserError("Invalid attempt to set memory in ext_get_allocated_child_storage"))?; + this.memory.write_primitive(written_out, value.len() as u32) + .map_err(|_| UserError("Invalid attempt to write written_out in ext_get_allocated_child_storage"))?; + Ok(offset) + } else { + this.memory.write_primitive(written_out, u32::max_value()) + .map_err(|_| UserError("Invalid attempt to write failed written_out in ext_get_allocated_child_storage"))?; + Ok(0) + } + }, // return u32::max_value() if no value exists for the key. ext_get_storage_into(key_data: *const u8, key_len: u32, value_data: *mut u8, value_len: u32, value_offset: u32) -> u32 => { let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to get key in ext_get_storage_into"))?; @@ -243,9 +341,9 @@ impl_function_executor!(this: FunctionExecutor<'e, E>, format!(" {}", ::primitives::hexdisplay::ascii_format(&key)) }, if let Some(ref b) = maybe_value { - format!("{}", HexDisplay::from(b)) + &format!("{}", HexDisplay::from(b)) } else { - "".to_owned() + "" }, HexDisplay::from(&key) ); @@ -259,11 +357,61 @@ impl_function_executor!(this: FunctionExecutor<'e, E>, Ok(u32::max_value()) } }, + // return u32::max_value() if no value exists for the key. + ext_get_child_storage_into(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, value_data: *mut u8, value_len: u32, value_offset: u32) -> u32 => { + let storage_key = this.memory.get( + storage_key_data, + storage_key_len as usize + ).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_get_child_storage_into"))?; + let key = this.memory.get( + key_data, + key_len as usize + ).map_err(|_| UserError("Invalid attempt to get key in ext_get_child_storage_into"))?; + let maybe_value = this.ext.child_storage(&storage_key, &key); + debug_trace!(target: "wasm-trace", "*** Getting storage: {} -> {} == {} [k={}]", + ::primitives::hexdisplay::ascii_format(&storage_key), + if let Some(_preimage) = this.hash_lookup.get(&key) { + format!("%{}", ::primitives::hexdisplay::ascii_format(&_preimage)) + } else { + format!(" {}", ::primitives::hexdisplay::ascii_format(&key)) + }, + if let Some(ref b) = maybe_value { + &format!("{}", HexDisplay::from(b)) + } else { + "" + }, + HexDisplay::from(&key) + ); + + if let Some(value) = maybe_value { + let value = &value[value_offset as usize..]; + let written = ::std::cmp::min(value_len as usize, value.len()); + this.memory.set(value_data, &value[..written]).map_err(|_| UserError("Invalid attempt to set value in ext_get_child_storage_into"))?; + Ok(written as u32) + } else { + Ok(u32::max_value()) + } + }, ext_storage_root(result: *mut u8) => { let r = this.ext.storage_root(); this.memory.set(result, r.as_ref()).map_err(|_| UserError("Invalid attempt to set memory in ext_storage_root"))?; Ok(()) }, + ext_child_storage_root(storage_key_data: *const u8, storage_key_len: u32, written_out: *mut u32) -> *mut u8 => { + let storage_key = this.memory.get(storage_key_data, storage_key_len as usize).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_child_storage_root"))?; + let r = this.ext.child_storage_root(&storage_key); + if let Some(value) = r { + let offset = this.heap.allocate(value.len() as u32) as u32; + this.memory.set(offset, &value).map_err(|_| UserError("Invalid attempt to set memory in ext_child_storage_root"))?; + this.memory.write_primitive(written_out, value.len() as u32) + .map_err(|_| UserError("Invalid attempt to write written_out in ext_child_storage_root"))?; + Ok(offset) + } else { + this.memory.write_primitive(written_out, u32::max_value()) + .map_err(|_| UserError("Invalid attempt to write failed written_out in ext_child_storage_root"))?; + Ok(0) + } + }, ext_storage_changes_root(block: u64, result: *mut u8) -> u32 => { let r = this.ext.storage_changes_root(block); if let Some(ref r) = r { @@ -300,9 +448,9 @@ impl_function_executor!(this: FunctionExecutor<'e, E>, let hashed_key = twox_128(&key); debug_trace!(target: "xxhash", "XXhash: {} -> {}", if let Ok(_skey) = ::std::str::from_utf8(&key) { - _skey.to_owned() + _skey } else { - format!("{}", HexDisplay::from(&key)) + &format!("{}", HexDisplay::from(&key)) }, HexDisplay::from(&hashed_key) ); diff --git a/substrate/core/network-libp2p/src/custom_proto.rs b/substrate/core/network-libp2p/src/custom_proto.rs index dcf5aaf4e1..a2680b056e 100644 --- a/substrate/core/network-libp2p/src/custom_proto.rs +++ b/substrate/core/network-libp2p/src/custom_proto.rs @@ -161,7 +161,7 @@ where TSubstream: AsyncRead + AsyncWrite, // Note that `inner` is wrapped in a `Fuse`, therefore we can poll it forever. loop { match self.inner.poll()? { - Async::Ready(Some(mut data)) => + Async::Ready(Some(data)) => return Ok(Async::Ready(Some(data.freeze()))), Async::Ready(None) => if !self.requires_poll_complete && self.send_queue.is_empty() { diff --git a/substrate/core/network/src/blocks.rs b/substrate/core/network/src/blocks.rs index 7cc37624fc..db4c38af45 100644 --- a/substrate/core/network/src/blocks.rs +++ b/substrate/core/network/src/blocks.rs @@ -195,10 +195,10 @@ impl BlockCollection { mod test { use super::{BlockCollection, BlockData, BlockRangeState}; use message; - use runtime_primitives::testing::Block as RawBlock; + use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper}; use primitives::H256; - type Block = RawBlock; + type Block = RawBlock>; fn is_empty(bc: &BlockCollection) -> bool { bc.blocks.is_empty() && diff --git a/substrate/core/network/src/consensus_gossip.rs b/substrate/core/network/src/consensus_gossip.rs index b84d8d437f..0ecb0e5e68 100644 --- a/substrate/core/network/src/consensus_gossip.rs +++ b/substrate/core/network/src/consensus_gossip.rs @@ -310,11 +310,11 @@ impl Specialization for ConsensusGossip where #[cfg(test)] mod tests { - use runtime_primitives::testing::{H256, Block as RawBlock}; + use runtime_primitives::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; use std::time::Instant; use super::*; - type Block = RawBlock; + type Block = RawBlock>; #[test] fn collects_garbage() { diff --git a/substrate/core/network/src/protocol.rs b/substrate/core/network/src/protocol.rs index b5d49272a0..a8edaaceb6 100644 --- a/substrate/core/network/src/protocol.rs +++ b/substrate/core/network/src/protocol.rs @@ -516,8 +516,8 @@ impl, H: ExHashT> Protocol { for (who, ref mut peer) in peers.iter_mut() { let (hashes, to_send): (Vec<_>, Vec<_>) = extrinsics .iter() - .cloned() .filter(|&(ref hash, _)| peer.known_extrinsics.insert(hash.clone())) + .cloned() .unzip(); if !to_send.is_empty() { diff --git a/substrate/core/primitives/src/storage.rs b/substrate/core/primitives/src/storage.rs index 253713f738..f3b22294bd 100644 --- a/substrate/core/primitives/src/storage.rs +++ b/substrate/core/primitives/src/storage.rs @@ -72,4 +72,13 @@ pub mod well_known_keys { /// Changes trie configuration is stored under this key. pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie"; + + /// Prefix of child storage keys. + pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:"; + + /// Whether a key is a child storage key. + pub fn is_child_storage_key(key: &[u8]) -> bool { + key.starts_with(CHILD_STORAGE_KEY_PREFIX) + } + } diff --git a/substrate/core/rpc/src/author/mod.rs b/substrate/core/rpc/src/author/mod.rs index cd84f3947b..cbdb6f0b22 100644 --- a/substrate/core/rpc/src/author/mod.rs +++ b/substrate/core/rpc/src/author/mod.rs @@ -127,7 +127,7 @@ impl AuthorApi, BlockHash

, ExtrinsicFor

, Vec Result>> { - Ok(self.pool.all(usize::max_value())) + Ok(self.pool.ready().map(|tx| tx.data.clone()).collect()) } fn watch_extrinsic(&self, _metadata: Self::Metadata, subscriber: pubsub::Subscriber, BlockHash

>>, xt: Bytes) { diff --git a/substrate/core/service/src/components.rs b/substrate/core/service/src/components.rs index 50d27845e2..a30fb9940e 100644 --- a/substrate/core/service/src/components.rs +++ b/substrate/core/service/src/components.rs @@ -16,7 +16,6 @@ //! Substrate service components. -use std::fmt; use std::sync::Arc; use std::marker::PhantomData; use std::ops::Deref; @@ -39,7 +38,7 @@ use primitives::{Blake2Hasher}; pub type NetworkService = network::Service< ::Block, ::NetworkProtocol, - ::ExtrinsicHash, + <::Block as BlockT>::Hash, >; /// Code executor type for a factory. @@ -121,16 +120,14 @@ impl RuntimeGenesis for T {} pub trait ServiceFactory: 'static + Sized { /// Block type. type Block: BlockT; - /// Extrinsic hash type. - type ExtrinsicHash: ::std::hash::Hash + Eq + Copy + fmt::Debug + fmt::LowerHex + Serialize + DeserializeOwned + ::std::str::FromStr + Send + Sync + Default + 'static; /// Network protocol extensions. type NetworkProtocol: network::specialization::Specialization; /// Chain runtime. type RuntimeDispatch: NativeExecutionDispatch + Send + Sync + 'static; /// Extrinsic pool backend type for the full client. - type FullTransactionPoolApi: txpool::ChainApi + Send + 'static; + type FullTransactionPoolApi: txpool::ChainApi::Hash, Block = Self::Block> + Send + 'static; /// Extrinsic pool backend type for the light client. - type LightTransactionPoolApi: txpool::ChainApi + 'static; + type LightTransactionPoolApi: txpool::ChainApi::Hash, Block = Self::Block> + 'static; /// Genesis configuration for the runtime. type Genesis: RuntimeGenesis; /// Other configuration for service members. @@ -202,7 +199,7 @@ pub trait Components: 'static { type Executor: 'static + client::CallExecutor, Blake2Hasher> + Send + Sync; /// Extrinsic pool type. type TransactionPoolApi: 'static + txpool::ChainApi< - Hash = ::ExtrinsicHash, + Hash = <::Block as BlockT>::Hash, Block = FactoryBlock >; diff --git a/substrate/core/service/src/lib.rs b/substrate/core/service/src/lib.rs index 7ad0a4dbc1..848c96ae90 100644 --- a/substrate/core/service/src/lib.rs +++ b/substrate/core/service/src/lib.rs @@ -59,8 +59,9 @@ pub mod chain_ops; use std::io; use std::net::SocketAddr; -use std::sync::Arc; use std::collections::HashMap; +#[doc(hidden)] +pub use std::{ops::Deref, result::Result, sync::Arc}; use futures::prelude::*; use parking_lot::Mutex; use keystore::Store as Keystore; @@ -68,7 +69,8 @@ use client::BlockchainEvents; use runtime_primitives::traits::{Header, As}; use runtime_primitives::generic::BlockId; use exit_future::Signal; -use tokio::runtime::TaskExecutor; +#[doc(hidden)] +pub use tokio::runtime::TaskExecutor; use substrate_executor::NativeExecutor; use codec::{Encode, Decode}; @@ -393,13 +395,13 @@ impl TransactionPoolAdapter { impl network::TransactionPool, ComponentBlock> for TransactionPoolAdapter { fn transactions(&self) -> Vec<(ComponentExHash, ComponentExtrinsic)> { - self.pool.ready(|pending| pending + self.pool.ready() .map(|t| { let hash = t.hash.clone(); let ex: ComponentExtrinsic = t.data.clone(); (hash, ex) }) - .collect()) + .collect() } fn import(&self, transaction: &ComponentExtrinsic) -> Option> { @@ -438,3 +440,163 @@ impl network::TransactionPool, ComponentBlock< self.pool.on_broadcasted(propagations) } } + +/// Creates a simple `Service` implementation. +/// This `Service` just holds an instance to a `service::Service` and implements `Deref`. +/// It also provides a `new` function that takes a `config` and a `TaskExecutor`. +#[macro_export] +macro_rules! construct_simple_service { + ( + $name: ident + ) => { + pub struct $name { + inner: $crate::Service, + } + + impl $name { + fn new( + config: FactoryFullConfiguration, + executor: $crate::TaskExecutor + ) -> $crate::Result { + Ok( + Self { + inner: $crate::Service::new(config, executor)? + } + ) + } + } + + impl $crate::Deref for $name { + type Target = $crate::Service; + + fn deref(&self) -> &Self::Target { + &self.inner + } + } + } +} + +/// Constructs a service factory with the given name that implements the `ServiceFactory` trait. +/// The required parameters are required to be given in the exact order. Some parameters are followed +/// by `{}` blocks. These blocks are required and used to initialize the given parameter. +/// In these block it is required to write a closure that takes the same number of arguments, +/// the corresponding function in the `ServiceFactory` trait provides. +/// +/// # Example +/// +/// ```nocompile +/// construct_service_factory! { +/// struct Factory { +/// // Declare the block type +/// Block = Block, +/// // Declare the network protocol and give an initializer. +/// NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, +/// RuntimeDispatch = node_executor::Executor, +/// FullTransactionPoolApi = transaction_pool::ChainApi, FullExecutor, Block> +/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, +/// LightTransactionPoolApi = transaction_pool::ChainApi, LightExecutor, Block> +/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, +/// Genesis = GenesisConfig, +/// Configuration = (), +/// FullService = Service> +/// { |config, executor| Service::>::new(config, executor) }, +/// LightService = Service> +/// { |config, executor| Service::>::new(config, executor) }, +/// // Declare the import queue. The import queue is special as it takes two initializers. +/// // The first one is for the initializing the full import queue and the second for the +/// // light import queue. +/// ImportQueue = BasicQueue +/// { |_, _| Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) } +/// { |_, _| Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) }, +/// } +/// } +/// ``` +#[macro_export] +macro_rules! construct_service_factory { + ( + $(#[$attr:meta])* + struct $name:ident { + Block = $block:ty, + NetworkProtocol = $protocol:ty { $( $protocol_init:tt )* }, + RuntimeDispatch = $dispatch:ty, + FullTransactionPoolApi = $full_transaction:ty { $( $full_transaction_init:tt )* }, + LightTransactionPoolApi = $light_transaction:ty { $( $light_transaction_init:tt )* }, + Genesis = $genesis:ty, + Configuration = $config:ty, + FullService = $full_service:ty { $( $full_service_init:tt )* }, + LightService = $light_service:ty { $( $light_service_init:tt )* }, + ImportQueue = $import_queue:ty + { $( $full_import_queue_init:tt )* } + { $( $light_import_queue_init:tt )* }, + } + ) => { + $( #[$attr] )* + pub struct $name {} + + #[allow(unused_variables)] + impl $crate::ServiceFactory for $name { + type Block = $block; + type NetworkProtocol = $protocol; + type RuntimeDispatch = $dispatch; + type FullTransactionPoolApi = $full_transaction; + type LightTransactionPoolApi = $light_transaction; + type Genesis = $genesis; + type Configuration = $config; + type FullService = $full_service; + type LightService = $light_service; + type ImportQueue = $import_queue; + + fn build_full_transaction_pool( + config: $crate::TransactionPoolOptions, + client: $crate::Arc<$crate::FullClient> + ) -> $crate::Result<$crate::TransactionPool, $crate::Error> + { + ( $( $full_transaction_init )* ) (config, client) + } + + fn build_light_transaction_pool( + config: $crate::TransactionPoolOptions, + client: $crate::Arc<$crate::LightClient> + ) -> $crate::Result<$crate::TransactionPool, $crate::Error> + { + ( $( $light_transaction_init )* ) (config, client) + } + + fn build_network_protocol(config: &$crate::FactoryFullConfiguration) + -> $crate::Result + { + ( $( $protocol_init )* ) (config) + } + + fn build_full_import_queue( + config: &$crate::FactoryFullConfiguration, + client: $crate::Arc<$crate::FullClient>, + ) -> $crate::Result { + ( $( $full_import_queue_init )* ) (config, client) + } + + fn build_light_import_queue( + config: &FactoryFullConfiguration, + client: Arc<$crate::LightClient>, + ) -> Result { + ( $( $light_import_queue_init )* ) (config, client) + } + + fn new_light( + config: $crate::FactoryFullConfiguration, + executor: $crate::TaskExecutor + ) -> $crate::Result + { + ( $( $light_service_init )* ) (config, executor) + } + + fn new_full( + config: $crate::FactoryFullConfiguration, + executor: $crate::TaskExecutor + ) -> Result + { + ( $( $full_service_init )* ) (config, executor) + } + } + } +} diff --git a/substrate/core/service/test/src/lib.rs b/substrate/core/service/test/src/lib.rs index 4f51c25953..31ac007c4f 100644 --- a/substrate/core/service/test/src/lib.rs +++ b/substrate/core/service/test/src/lib.rs @@ -185,7 +185,7 @@ pub fn connectivity(spec: FactoryChainSpec) { { let mut network = TestNet::::new(&temp, spec.clone(), NUM_NODES, 0, vec![], 30400); info!("Checking star topology"); - let first_address = network.full_nodes[0].1.network().node_id().unwrap(); + let first_address = network.full_nodes[0].1.network().node_id().expect("No node address"); for (_, service) in network.full_nodes.iter().skip(1) { service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer"); } @@ -200,10 +200,10 @@ pub fn connectivity(spec: FactoryChainSpec) { { let mut network = TestNet::::new(&temp, spec, NUM_NODES as u32, 0, vec![], 30400); info!("Checking linked topology"); - let mut address = network.full_nodes[0].1.network().node_id().unwrap(); + let mut address = network.full_nodes[0].1.network().node_id().expect("No node address"); for (_, service) in network.full_nodes.iter().skip(1) { service.network().add_reserved_peer(address.clone()).expect("Error adding reserved peer"); - address = service.network().node_id().unwrap(); + address = service.network().node_id().expect("No node address"); } network.run_until_all_full(|_index, service| { service.network().status().num_peers == NUM_NODES as usize - 1 @@ -247,7 +247,7 @@ where let best_block = BlockId::number(first_service.client().info().unwrap().chain.best_number); first_service.transaction_pool().submit_one(&best_block, extrinsic_factory(&first_service)).unwrap(); network.run_until_all_full(|_index, service| - service.transaction_pool().all(usize::max_value()).len() == 1 + service.transaction_pool().ready().count() == 1 ); } diff --git a/substrate/core/sr-api/Cargo.toml b/substrate/core/sr-api/Cargo.toml index 2d1197613d..2877dfa919 100644 --- a/substrate/core/sr-api/Cargo.toml +++ b/substrate/core/sr-api/Cargo.toml @@ -5,7 +5,6 @@ authors = ["Parity Technologies "] [dependencies] parity-codec = { version = "2.1", default-features = false } -parity-codec-derive = { version = "2.1", default-features = false } sr-std = { path = "../sr-std", default-features = false } sr-primitives = { path = "../sr-primitives", default-features = false } sr-version = { path = "../sr-version", default-features = false } @@ -15,7 +14,6 @@ default = ["std"] std = [ "sr-std/std", "parity-codec/std", - "parity-codec-derive/std", "sr-primitives/std", "sr-version/std", ] diff --git a/substrate/core/sr-api/src/lib.rs b/substrate/core/sr-api/src/lib.rs index f338934f0d..2021e2f2a0 100644 --- a/substrate/core/sr-api/src/lib.rs +++ b/substrate/core/sr-api/src/lib.rs @@ -22,8 +22,6 @@ extern crate sr_std as rstd; extern crate sr_primitives as primitives; #[doc(hidden)] pub extern crate parity_codec as codec; -#[macro_use] -extern crate parity_codec_derive; extern crate sr_version as runtime_version; #[doc(hidden)] @@ -429,17 +427,6 @@ macro_rules! decl_apis { }; } -//TODO: Move into runtime! -#[derive(Encode)] -#[cfg_attr(feature = "std", derive(Debug, Decode))] -pub enum BlockBuilderError { - #[cfg(not(feature = "std"))] - Generic(&'static str), - #[cfg(feature = "std")] - Generic(String), - TimestampInFuture(u64), -} - decl_apis! { /// The `Core` api trait that is mandantory for each runtime. pub trait Core { @@ -482,7 +469,7 @@ decl_apis! { /// Generate inherent extrinsics. fn inherent_extrinsics(inherent: InherentExtrinsic) -> Vec; /// Check that the inherents are valid. - fn check_inherents(block: Block, data: InherentData) -> Result<(), BlockBuilderError>; + fn check_inherents(block: Block, data: InherentData) -> Result<(), Error>; /// Generate a random seed. fn random_seed() -> ::Hash; } diff --git a/substrate/core/sr-io/with_std.rs b/substrate/core/sr-io/with_std.rs index 0522dd23ac..ccadb89efc 100644 --- a/substrate/core/sr-io/with_std.rs +++ b/substrate/core/sr-io/with_std.rs @@ -47,6 +47,12 @@ pub fn storage(key: &[u8]) -> Option> { .expect("storage cannot be called outside of an Externalities-provided environment.") } +/// Get `key` from child storage and return a `Vec`, empty if there's a problem. +pub fn child_storage(storage_key: &[u8], key: &[u8]) -> Option> { + ext::with(|ext| ext.child_storage(storage_key, key).map(|s| s.to_vec())) + .expect("storage cannot be called outside of an Externalities-provided environment.") +} + /// Get `key` from storage, placing the value into `value_out` (as much of it as possible) and return /// the number of bytes that the entry in storage had beyond the offset or None if the storage entry /// doesn't exist at all. Note that if the buffer is smaller than the storage entry length, the returned @@ -55,7 +61,20 @@ pub fn read_storage(key: &[u8], value_out: &mut [u8], value_offset: usize) -> Op ext::with(|ext| ext.storage(key).map(|value| { let value = &value[value_offset..]; let written = ::std::cmp::min(value.len(), value_out.len()); - value_out[0..written].copy_from_slice(&value[0..written]); + value_out[..written].copy_from_slice(&value[..written]); + value.len() + })).expect("read_storage cannot be called outside of an Externalities-provided environment.") +} + +/// Get `key` from child storage, placing the value into `value_out` (as much of it as possible) and return +/// the number of bytes that the entry in storage had beyond the offset or None if the storage entry +/// doesn't exist at all. Note that if the buffer is smaller than the storage entry length, the returned +/// number of bytes is not equal to the number of bytes written to the `value_out`. +pub fn read_child_storage(storage_key: &[u8], key: &[u8], value_out: &mut [u8], value_offset: usize) -> Option { + ext::with(|ext| ext.child_storage(storage_key, key).map(|value| { + let value = &value[value_offset..]; + let written = ::std::cmp::min(value.len(), value_out.len()); + value_out[..written].copy_from_slice(&value[..written]); value.len() })).expect("read_storage cannot be called outside of an Externalities-provided environment.") } @@ -67,6 +86,13 @@ pub fn set_storage(key: &[u8], value: &[u8]) { ); } +/// Set the child storage of a key to some value. +pub fn set_child_storage(storage_key: &[u8], key: &[u8], value: &[u8]) { + ext::with(|ext| + ext.set_child_storage(storage_key.to_vec(), key.to_vec(), value.to_vec()) + ); +} + /// Clear the storage of a key. pub fn clear_storage(key: &[u8]) { ext::with(|ext| @@ -74,6 +100,13 @@ pub fn clear_storage(key: &[u8]) { ); } +/// Clear the storage of a key. +pub fn clear_child_storage(storage_key: &[u8], key: &[u8]) { + ext::with(|ext| + ext.clear_child_storage(storage_key, key) + ); +} + /// Check whether a given `key` exists in storage. pub fn exists_storage(key: &[u8]) -> bool { ext::with(|ext| @@ -81,6 +114,13 @@ pub fn exists_storage(key: &[u8]) -> bool { ).unwrap_or(false) } +/// Check whether a given `key` exists in storage. +pub fn exists_child_storage(storage_key: &[u8], key: &[u8]) -> bool { + ext::with(|ext| + ext.exists_child_storage(storage_key, key) + ).unwrap_or(false) +} + /// Clear the storage entries with a key that starts with the given prefix. pub fn clear_prefix(prefix: &[u8]) { ext::with(|ext| @@ -88,6 +128,13 @@ pub fn clear_prefix(prefix: &[u8]) { ); } +/// Clear an entire child storage. +pub fn kill_child_storage(storage_key: &[u8]) { + ext::with(|ext| + ext.kill_child_storage(storage_key) + ); +} + /// The current relay chain identifier. pub fn chain_id() -> u64 { ext::with(|ext| @@ -102,6 +149,13 @@ pub fn storage_root() -> H256 { ).unwrap_or(H256::new()) } +/// "Commit" all existing operations and compute the resultant child storage root. +pub fn child_storage_root(storage_key: &[u8]) -> Option> { + ext::with(|ext| + ext.child_storage_root(storage_key) + ).unwrap_or(None) +} + /// "Commit" all existing operations and get the resultant storage change root. pub fn storage_changes_root(block: u64) -> Option { ext::with(|ext| diff --git a/substrate/core/sr-io/without_std.rs b/substrate/core/sr-io/without_std.rs index db2a1d35d8..5b892ecffe 100644 --- a/substrate/core/sr-io/without_std.rs +++ b/substrate/core/sr-io/without_std.rs @@ -54,16 +54,24 @@ pub extern fn oom(_: ::core::alloc::Layout) -> ! { } extern "C" { + fn ext_free(addr: *mut u8); fn ext_print_utf8(utf8_data: *const u8, utf8_len: u32); fn ext_print_hex(data: *const u8, len: u32); fn ext_print_num(value: u64); fn ext_set_storage(key_data: *const u8, key_len: u32, value_data: *const u8, value_len: u32); + fn ext_set_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, value_data: *const u8, value_len: u32); fn ext_clear_storage(key_data: *const u8, key_len: u32); + fn ext_clear_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32); fn ext_exists_storage(key_data: *const u8, key_len: u32) -> u32; + fn ext_exists_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32) -> u32; fn ext_clear_prefix(prefix_data: *const u8, prefix_len: u32); + fn ext_kill_child_storage(storage_key_data: *const u8, storage_key_len: u32); fn ext_get_allocated_storage(key_data: *const u8, key_len: u32, written_out: *mut u32) -> *mut u8; + fn ext_get_allocated_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, written_out: *mut u32) -> *mut u8; fn ext_get_storage_into(key_data: *const u8, key_len: u32, value_data: *mut u8, value_len: u32, value_offset: u32) -> u32; + fn ext_get_child_storage_into(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, value_data: *mut u8, value_len: u32, value_offset: u32) -> u32; fn ext_storage_root(result: *mut u8); + fn ext_child_storage_root(storage_key_data: *const u8, storage_key_len: u32, written_out: *mut u32) -> *mut u8; fn ext_storage_changes_root(block: u64, result: *mut u8) -> u32; fn ext_blake2_256_enumerated_trie_root(values_data: *const u8, lens_data: *const u32, lens_len: u32, result: *mut u8); fn ext_chain_id() -> u64; @@ -104,7 +112,24 @@ pub fn storage(key: &[u8]) -> Option> { if length == u32::max_value() { None } else { - Some(Vec::from_raw_parts(ptr, length as usize, length as usize)) + let ret = slice::from_raw_parts(ptr, length as usize).to_vec(); + ext_free(ptr); + Some(ret) + } + } +} + +/// Get `key` from child storage and return a `Vec`, empty if there's a problem. +pub fn child_storage(storage_key: &[u8], key: &[u8]) -> Option> { + let mut length: u32 = 0; + unsafe { + let ptr = ext_get_allocated_child_storage(storage_key.as_ptr(), storage_key.len() as u32, key.as_ptr(), key.len() as u32, &mut length); + if length == u32::max_value() { + None + } else { + let ret = slice::from_raw_parts(ptr, length as usize).to_vec(); + ext_free(ptr); + Some(ret) } } } @@ -119,6 +144,17 @@ pub fn set_storage(key: &[u8], value: &[u8]) { } } +/// Set the child storage of some particular key to Some value. +pub fn set_child_storage(storage_key: &[u8], key: &[u8], value: &[u8]) { + unsafe { + ext_set_child_storage( + storage_key.as_ptr(), key.len() as u32, + key.as_ptr(), key.len() as u32, + value.as_ptr(), value.len() as u32 + ); + } +} + /// Clear the storage of some particular key. pub fn clear_storage(key: &[u8]) { unsafe { @@ -128,6 +164,16 @@ pub fn clear_storage(key: &[u8]) { } } +/// Clear the storage of some particular key. +pub fn clear_child_storage(storage_key: &[u8], key: &[u8]) { + unsafe { + ext_clear_child_storage( + storage_key.as_ptr(), storage_key.len() as u32, + key.as_ptr(), key.len() as u32 + ); + } +} + /// Determine whether a particular key exists in storage. pub fn exists_storage(key: &[u8]) -> bool { unsafe { @@ -137,6 +183,16 @@ pub fn exists_storage(key: &[u8]) -> bool { } } +/// Determine whether a particular key exists in storage. +pub fn exists_child_storage(storage_key: &[u8], key: &[u8]) -> bool { + unsafe { + ext_exists_child_storage( + storage_key.as_ptr(), storage_key.len() as u32, + key.as_ptr(), key.len() as u32 + ) != 0 + } +} + /// Clear the storage entries key of which starts with the given prefix. pub fn clear_prefix(prefix: &[u8]) { unsafe { @@ -147,6 +203,16 @@ pub fn clear_prefix(prefix: &[u8]) { } } +/// Clear an entire child storage. +pub fn kill_child_storage(storage_key: &[u8]) { + unsafe { + ext_kill_child_storage( + storage_key.as_ptr(), + storage_key.len() as u32 + ); + } +} + /// Get `key` from storage, placing the value into `value_out` (as much as possible) and return /// the number of bytes that the key in storage was beyond the offset. pub fn read_storage(key: &[u8], value_out: &mut [u8], value_offset: usize) -> Option { @@ -162,6 +228,22 @@ pub fn read_storage(key: &[u8], value_out: &mut [u8], value_offset: usize) -> Op } } +/// Get `key` from child storage, placing the value into `value_out` (as much as possible) and return +/// the number of bytes that the key in storage was beyond the offset. +pub fn read_child_storage(storage_key: &[u8], key: &[u8], value_out: &mut [u8], value_offset: usize) -> Option { + unsafe { + match ext_get_child_storage_into( + storage_key.as_ptr(), storage_key.len() as u32, + key.as_ptr(), key.len() as u32, + value_out.as_mut_ptr(), value_out.len() as u32, + value_offset as u32 + ) { + none if none == u32::max_value() => None, + length => Some(length as usize), + } + } +} + /// The current storage's root. pub fn storage_root() -> [u8; 32] { let mut result: [u8; 32] = Default::default(); @@ -171,6 +253,21 @@ pub fn storage_root() -> [u8; 32] { result } +/// "Commit" all existing operations and compute the resultant child storage root. +pub fn child_storage_root(storage_key: &[u8]) -> Option> { + let mut length: u32 = 0; + unsafe { + let ptr = ext_child_storage_root(storage_key.as_ptr(), storage_key.len() as u32, &mut length); + if length == u32::max_value() { + None + } else { + let ret = slice::from_raw_parts(ptr, length as usize).to_vec(); + ext_free(ptr); + Some(ret) + } + } +} + /// The current storage' changes root. pub fn storage_changes_root(block: u64) -> Option<[u8; 32]> { let mut result: [u8; 32] = Default::default(); diff --git a/substrate/core/sr-primitives/Cargo.toml b/substrate/core/sr-primitives/Cargo.toml index 0efb04f67b..df80d43a9f 100644 --- a/substrate/core/sr-primitives/Cargo.toml +++ b/substrate/core/sr-primitives/Cargo.toml @@ -13,7 +13,6 @@ parity-codec-derive = { version = "2.1", default-features = false } substrate-primitives = { path = "../primitives", default-features = false } sr-std = { path = "../sr-std", default-features = false } sr-io = { path = "../sr-io", default-features = false } -sr-version = { path = "../sr-version", default-features = false } log = {version = "0.4", optional = true } [dev-dependencies] @@ -28,7 +27,6 @@ std = [ "log", "sr-std/std", "sr-io/std", - "sr-version/std", "parity-codec/std", "substrate-primitives/std", ] diff --git a/substrate/core/sr-primitives/src/generic/block.rs b/substrate/core/sr-primitives/src/generic/block.rs index 4013cbc6a9..6cbb7929d8 100644 --- a/substrate/core/sr-primitives/src/generic/block.rs +++ b/substrate/core/sr-primitives/src/generic/block.rs @@ -72,7 +72,7 @@ pub struct Block { impl traits::Block for Block where Header: HeaderT, - Extrinsic: Member + Codec, + Extrinsic: Member + Codec + traits::Extrinsic, { type Extrinsic = Extrinsic; type Header = Header; @@ -102,4 +102,4 @@ pub struct SignedBlock { pub block: Block, /// Block justification. pub justification: Justification, -} \ No newline at end of file +} diff --git a/substrate/core/sr-primitives/src/generic/digest.rs b/substrate/core/sr-primitives/src/generic/digest.rs index 38d2262cd6..4d44ff5ce0 100644 --- a/substrate/core/sr-primitives/src/generic/digest.rs +++ b/substrate/core/sr-primitives/src/generic/digest.rs @@ -113,17 +113,11 @@ impl traits::DigestItem for D type AuthorityId = AuthorityId; fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> { - match *self { - DigestItem::AuthoritiesChange(ref authorities) => Some(authorities), - _ => None, - } + self.dref().as_authorities_change() } fn as_changes_trie_root(&self) -> Option<&Hash> { - match *self { - DigestItem::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), - _ => None, - } + self.dref().as_changes_trie_root() } } @@ -150,6 +144,22 @@ impl Decode for DigestItem } } +impl<'a, Hash: Codec + Member, AuthorityId: Codec + Member> DigestItemRef<'a, Hash, AuthorityId> { + pub fn as_authorities_change(&self) -> Option<&'a [AuthorityId]> { + match *self { + DigestItemRef::AuthoritiesChange(ref authorities) => Some(authorities), + _ => None, + } + } + + pub fn as_changes_trie_root(&self) -> Option<&'a Hash> { + match *self { + DigestItemRef::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), + _ => None, + } + } +} + impl<'a, Hash: Encode, AuthorityId: Encode> Encode for DigestItemRef<'a, Hash, AuthorityId> { fn encode(&self) -> Vec { let mut v = Vec::new(); diff --git a/substrate/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs b/substrate/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs index abaa9e7a93..5d91556d8c 100644 --- a/substrate/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs +++ b/substrate/core/sr-primitives/src/generic/unchecked_mortal_extrinsic.rs @@ -22,7 +22,7 @@ use std::fmt; use rstd::prelude::*; use codec::{Decode, Encode, Input}; use traits::{self, Member, SimpleArithmetic, MaybeDisplay, CurrentHeight, BlockNumberToHash, Lookup, - Checkable}; + Checkable, Extrinsic}; use super::{CheckedExtrinsic, Era}; const TRANSACTION_VERSION: u8 = 1; @@ -56,10 +56,11 @@ impl UncheckedMortalExtrinsic bool { - self.signature.is_some() +impl Extrinsic for UncheckedMortalExtrinsic { + fn is_signed(&self) -> Option { + Some(self.signature.is_some()) } } @@ -221,49 +222,49 @@ mod tests { #[test] fn unsigned_check_should_work() { let ux = Ex::new_unsigned(DUMMY_FUNCTION); - assert!(!ux.is_signed()); + assert!(!ux.is_signed().unwrap_or(false)); assert!(>::check(ux, &TestContext).is_ok()); } #[test] fn badly_signed_check_should_fail() { let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, vec![0u8]), Era::immortal()); - assert!(ux.is_signed()); + assert!(ux.is_signed().unwrap_or(false)); assert_eq!(>::check(ux, &TestContext), Err("bad signature in extrinsic")); } #[test] fn immortal_signed_check_should_work() { let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::immortal(), 0u64).encode()), Era::immortal()); - assert!(ux.is_signed()); + assert!(ux.is_signed().unwrap_or(false)); assert_eq!(>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: DUMMY_FUNCTION })); } #[test] fn mortal_signed_check_should_work() { let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::mortal(32, 42), 42u64).encode()), Era::mortal(32, 42)); - assert!(ux.is_signed()); + assert!(ux.is_signed().unwrap_or(false)); assert_eq!(>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: DUMMY_FUNCTION })); } #[test] fn later_mortal_signed_check_should_work() { let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::mortal(32, 11), 11u64).encode()), Era::mortal(32, 11)); - assert!(ux.is_signed()); + assert!(ux.is_signed().unwrap_or(false)); assert_eq!(>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: DUMMY_FUNCTION })); } #[test] fn too_late_mortal_signed_check_should_fail() { let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::mortal(32, 10), 10u64).encode()), Era::mortal(32, 10)); - assert!(ux.is_signed()); + assert!(ux.is_signed().unwrap_or(false)); assert_eq!(>::check(ux, &TestContext), Err("bad signature in extrinsic")); } #[test] fn too_early_mortal_signed_check_should_fail() { let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::mortal(32, 43), 43u64).encode()), Era::mortal(32, 43)); - assert!(ux.is_signed()); + assert!(ux.is_signed().unwrap_or(false)); assert_eq!(>::check(ux, &TestContext), Err("bad signature in extrinsic")); } diff --git a/substrate/core/sr-primitives/src/lib.rs b/substrate/core/sr-primitives/src/lib.rs index c403cf5846..15b86484a6 100644 --- a/substrate/core/sr-primitives/src/lib.rs +++ b/substrate/core/sr-primitives/src/lib.rs @@ -37,7 +37,6 @@ extern crate num_traits; extern crate integer_sqrt; extern crate sr_std as rstd; extern crate sr_io as runtime_io; -extern crate sr_version as runtime_version; #[doc(hidden)] pub extern crate parity_codec as codec; extern crate substrate_primitives; @@ -65,6 +64,12 @@ pub type Justification = Vec; use traits::{Verify, Lazy}; +/// A String that is a `&'static str` on `no_std` and a `String` on `std`. +#[cfg(not(feature = "std"))] +pub type RuntimeString = &'static str; +#[cfg(feature = "std")] +pub type RuntimeString = ::std::borrow::Cow<'static, str>; + #[cfg(feature = "std")] pub use serde::{Serialize, de::DeserializeOwned}; @@ -264,19 +269,32 @@ pub fn verify_encoded_lazy(sig: &V, item: &T, signe #[macro_export] macro_rules! __impl_outer_config_types { - ($concrete:ident $config:ident $snake:ident $($rest:ident)*) => { + ( + $concrete:ident $config:ident $snake:ident < $ignore:ident > $( $rest:tt )* + ) => { #[cfg(any(feature = "std", test))] pub type $config = $snake::GenesisConfig<$concrete>; __impl_outer_config_types! {$concrete $($rest)*} }; + ( + $concrete:ident $config:ident $snake:ident $( $rest:tt )* + ) => { + #[cfg(any(feature = "std", test))] + pub type $config = $snake::GenesisConfig; + __impl_outer_config_types! {$concrete $($rest)*} + }; ($concrete:ident) => () } #[macro_export] /// Implement the output "meta" module configuration struct. macro_rules! impl_outer_config { - ( pub struct $main:ident for $concrete:ident { $( $config:ident => $snake:ident, )* } ) => { - __impl_outer_config_types! { $concrete $( $config $snake )* } + ( + pub struct $main:ident for $concrete:ident { + $( $config:ident => $snake:ident $( < $generic:ident > )*, )* + } + ) => { + __impl_outer_config_types! { $concrete $( $config $snake $( < $generic > )* )* } #[cfg(any(feature = "std", test))] #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -321,7 +339,7 @@ macro_rules! impl_outer_log { ( $(#[$attr:meta])* pub enum $name:ident ($internal:ident: DigestItem<$( $genarg:ty ),*>) for $trait:ident { - $( $module:ident($( $item:ident ),*) ),* + $( $module:ident( $( $sitem:ident ),* ) ),* } ) => { /// Wrapper for all possible log entries for the `$trait` runtime. Provides binary-compatible @@ -338,7 +356,7 @@ macro_rules! impl_outer_log { #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] $(#[$attr])* #[allow(non_camel_case_types)] - enum $internal { + pub enum InternalLog { $( $module($module::Log<$trait>), )* @@ -352,14 +370,27 @@ macro_rules! impl_outer_log { fn dref<'a>(&'a self) -> Option<$crate::generic::DigestItemRef<'a, $($genarg),*>> { match self.0 { $($( - $internal::$module($module::RawLog::$item(ref v)) => - Some($crate::generic::DigestItemRef::$item(v)), + $internal::$module($module::RawLog::$sitem(ref v)) => + Some($crate::generic::DigestItemRef::$sitem(v)), )*)* _ => None, } } } + impl $crate::traits::DigestItem for $name { + type Hash = <$crate::generic::DigestItem<$($genarg),*> as $crate::traits::DigestItem>::Hash; + type AuthorityId = <$crate::generic::DigestItem<$($genarg),*> as $crate::traits::DigestItem>::AuthorityId; + + fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> { + self.dref().and_then(|dref| dref.as_authorities_change()) + } + + fn as_changes_trie_root(&self) -> Option<&Self::Hash> { + self.dref().and_then(|dref| dref.as_changes_trie_root()) + } + } + impl From<$crate::generic::DigestItem<$($genarg),*>> for $name { /// Converts `generic::DigestItem` into `$name`. If `generic::DigestItem` represents /// a system item which is supported by the runtime, it is returned. @@ -370,8 +401,8 @@ macro_rules! impl_outer_log { fn from(gen: $crate::generic::DigestItem<$($genarg),*>) -> Self { match gen { $($( - $crate::generic::DigestItem::$item(value) => - $name($internal::$module($module::RawLog::$item(value))), + $crate::generic::DigestItem::$sitem(value) => + $name($internal::$module($module::RawLog::$sitem(value))), )*)* _ => gen.as_other() .and_then(|value| $crate::codec::Decode::decode(&mut &value[..])) @@ -412,10 +443,10 @@ macro_rules! impl_outer_log { } } - impl From<$module::Log<$trait>> for $internal { + impl From<$module::Log<$trait>> for InternalLog { /// Converts single module log item into `$internal`. fn from(x: $module::Log<$trait>) -> Self { - $internal::$module(x) + InternalLog::$module(x) } } )* @@ -426,6 +457,7 @@ macro_rules! impl_outer_log { mod tests { use substrate_primitives::hash::H256; use codec::{Encode as EncodeHidden, Decode as DecodeHidden}; + use traits::DigestItem; pub trait RuntimeT { type AuthorityId; @@ -437,31 +469,31 @@ mod tests { type AuthorityId = u64; } + mod a { + use super::RuntimeT; + pub type Log = RawLog<::AuthorityId>; + + #[derive(Serialize, Deserialize, Debug, Encode, Decode, PartialEq, Eq, Clone)] + pub enum RawLog { A1(AuthorityId), AuthoritiesChange(Vec), A3(AuthorityId) } + } + + mod b { + use super::RuntimeT; + pub type Log = RawLog<::AuthorityId>; + + #[derive(Serialize, Deserialize, Debug, Encode, Decode, PartialEq, Eq, Clone)] + pub enum RawLog { B1(AuthorityId), B2(AuthorityId) } + } + + // TODO try to avoid redundant brackets: a(AuthoritiesChange), b + impl_outer_log! { + pub enum Log(InternalLog: DigestItem) for Runtime { + a(AuthoritiesChange), b() + } + } + #[test] fn impl_outer_log_works() { - mod a { - use super::RuntimeT; - pub type Log = RawLog<::AuthorityId>; - - #[derive(Serialize, Deserialize, Debug, Encode, Decode, PartialEq, Eq, Clone)] - pub enum RawLog { A1(AuthorityId), AuthoritiesChange(Vec), A3(AuthorityId) } - } - - mod b { - use super::RuntimeT; - pub type Log = RawLog<::AuthorityId>; - - #[derive(Serialize, Deserialize, Debug, Encode, Decode, PartialEq, Eq, Clone)] - pub enum RawLog { B1(AuthorityId), B2(AuthorityId) } - } - - // TODO try to avoid redundant brackets: a(AuthoritiesChange), b - impl_outer_log! { - pub enum Log(InternalLog: DigestItem) for Runtime { - a(AuthoritiesChange), b() - } - } - // encode/decode regular item let b1: Log = b::RawLog::B1::(777).into(); let encoded_b1 = b1.encode(); @@ -487,5 +519,11 @@ mod tests { super::generic::DigestItem::AuthoritiesChange::(authorities) => assert_eq!(authorities, vec![100, 200, 300]), _ => panic!("unexpected generic_auth_change: {:?}", generic_auth_change), } + + // check that as-style methods are working with system items + assert!(auth_change.as_authorities_change().is_some()); + + // check that as-style methods are not working with regular items + assert!(b1.as_authorities_change().is_none()); } } diff --git a/substrate/core/sr-primitives/src/testing.rs b/substrate/core/sr-primitives/src/testing.rs index fea8586f5a..ab57f1aa6c 100644 --- a/substrate/core/sr-primitives/src/testing.rs +++ b/substrate/core/sr-primitives/src/testing.rs @@ -17,7 +17,7 @@ //! Testing utilities. use serde::{Serialize, de::DeserializeOwned}; -use std::fmt::Debug; +use std::{fmt::Debug, ops::Deref}; use codec::Codec; use traits::{self, Checkable, Applyable, BlakeTwo256}; use generic::DigestItem as GenDigestItem; @@ -93,13 +93,36 @@ impl traits::Header for Header { } } +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug, Encode, Decode)] +pub struct ExtrinsicWrapper(Xt); + +impl traits::Extrinsic for ExtrinsicWrapper { + fn is_signed(&self) -> Option { + None + } +} + +impl From for ExtrinsicWrapper { + fn from(xt: Xt) -> Self { + ExtrinsicWrapper(xt) + } +} + +impl Deref for ExtrinsicWrapper { + type Target = Xt; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + #[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug, Encode, Decode)] pub struct Block { pub header: Header, pub extrinsics: Vec, } -impl traits::Block for Block { +impl traits::Block for Block { type Extrinsic = Xt; type Header = Header; type Hash =

::Hash; @@ -125,6 +148,11 @@ impl Checkable for Test type Checked = Self; fn check(self, _: &Context) -> Result { Ok(self) } } +impl traits::Extrinsic for TestXt { + fn is_signed(&self) -> Option { + None + } +} impl Applyable for TestXt where Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Serialize + DeserializeOwned, { diff --git a/substrate/core/sr-primitives/src/traits.rs b/substrate/core/sr-primitives/src/traits.rs index 072dffe3f3..82d28d013e 100644 --- a/substrate/core/sr-primitives/src/traits.rs +++ b/substrate/core/sr-primitives/src/traits.rs @@ -383,6 +383,17 @@ pub trait MaybeDisplay {} #[cfg(not(feature = "std"))] impl MaybeDisplay for T {} +#[cfg(feature = "std")] +pub trait MaybeDecode: ::codec::Decode {} +#[cfg(feature = "std")] +impl MaybeDecode for T {} + +#[cfg(not(feature = "std"))] +pub trait MaybeDecode {} +#[cfg(not(feature = "std"))] +impl MaybeDecode for T {} + + pub trait Member: Send + Sync + Sized + MaybeSerializeDebug + Eq + PartialEq + Clone + 'static {} impl Member for T {} @@ -430,7 +441,7 @@ pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerializeDebug + 'stat /// /// You can get an iterator over each of the `extrinsics` and retrieve the `header`. pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerializeDebug + 'static { - type Extrinsic: Member + Codec; + type Extrinsic: Member + Codec + Extrinsic; type Header: Header; type Hash: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]>; @@ -527,12 +538,37 @@ pub trait DigestItem: Codec + Member { type AuthorityId: Member; /// Returns Some if the entry is the `AuthoritiesChange` entry. - fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> { - None - } + fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]>; /// Returns Some if the entry is the `ChangesTrieRoot` entry. - fn as_changes_trie_root(&self) -> Option<&Self::Hash> { - None - } + fn as_changes_trie_root(&self) -> Option<&Self::Hash>; +} + +/// Something that provides an inherent for a runtime. +pub trait ProvideInherent { + /// The inherent that is provided. + type Inherent: Encode + MaybeDecode; + /// The error used by this trait. + type Error: Encode + MaybeDecode; + /// The call for setting the inherent. + type Call: Encode + MaybeDecode; + + /// Create the inherent extrinsics. + /// + /// # Return + /// + /// Returns a vector with tuples containing the index for the extrinsic and the extrinsic itself. + fn create_inherent_extrinsics(data: Self::Inherent) -> Vec<(u32, Self::Call)>; + + /// Check that the given inherent is valid. + fn check_inherent Option<&Self::Call>>( + block: &Block, data: Self::Inherent, extract_function: &F + ) -> Result<(), Self::Error>; +} + +/// Something that acts like an `Extrinsic`. +pub trait Extrinsic { + /// Is this `Extrinsic` signed? + /// If no information are available about signed/unsigned, `None` should be returned. + fn is_signed(&self) -> Option { None } } diff --git a/substrate/core/sr-primitives/src/transaction_validity.rs b/substrate/core/sr-primitives/src/transaction_validity.rs index fc125e24a8..3e185fea99 100644 --- a/substrate/core/sr-primitives/src/transaction_validity.rs +++ b/substrate/core/sr-primitives/src/transaction_validity.rs @@ -33,11 +33,11 @@ pub type TransactionTag = Vec; #[cfg_attr(feature = "std", derive(Debug))] pub enum TransactionValidity { Invalid, - Valid( - /* priority: */TransactionPriority, - /* requires: */Vec, - /* provides: */Vec, - /* longevity: */TransactionLongevity - ), + Valid { + priority: TransactionPriority, + requires: Vec, + provides: Vec, + longevity: TransactionLongevity + }, Unknown, } diff --git a/substrate/core/sr-std/without_std.rs b/substrate/core/sr-std/without_std.rs index 4b5c6c7705..d5065648e9 100644 --- a/substrate/core/sr-std/without_std.rs +++ b/substrate/core/sr-std/without_std.rs @@ -15,7 +15,8 @@ // along with Substrate. If not, see . #[cfg(feature = "nightly")] -extern crate alloc; +#[doc(hidden)] +pub extern crate alloc; extern "C" { fn ext_malloc(size: usize) -> *mut u8; diff --git a/substrate/core/sr-version/Cargo.toml b/substrate/core/sr-version/Cargo.toml index 9f3cfe7f64..785eecdfa7 100644 --- a/substrate/core/sr-version/Cargo.toml +++ b/substrate/core/sr-version/Cargo.toml @@ -9,6 +9,7 @@ serde_derive = { version = "1.0", optional = true } parity-codec = { version = "2.1", default-features = false } parity-codec-derive = { version = "2.1", default-features = false } sr-std = { path = "../sr-std", default-features = false } +sr-primitives = { path = "../sr-primitives", default-features = false } [features] default = ["std"] @@ -17,4 +18,5 @@ std = [ "serde_derive", "parity-codec/std", "sr-std/std", + "sr-primitives/std", ] diff --git a/substrate/core/sr-version/src/lib.rs b/substrate/core/sr-version/src/lib.rs index f13eeef763..8af084da5e 100644 --- a/substrate/core/sr-version/src/lib.rs +++ b/substrate/core/sr-version/src/lib.rs @@ -29,15 +29,14 @@ extern crate sr_std as rstd; #[macro_use] extern crate parity_codec_derive; +extern crate sr_primitives as runtime_primitives; + #[cfg(feature = "std")] use std::fmt; #[cfg(feature = "std")] use std::collections::HashSet; -#[cfg(feature = "std")] -pub type VersionString = ::std::borrow::Cow<'static, str>; -#[cfg(not(feature = "std"))] -pub type VersionString = &'static str; +use runtime_primitives::RuntimeString; /// The identity of a particular API interface that the runtime might provide. pub type ApiId = [u8; 8]; @@ -80,14 +79,14 @@ pub struct RuntimeVersion { /// Identifies the different Substrate runtimes. There'll be at least polkadot and node. /// A different on-chain spec_name to that of the native runtime would normally result /// in node not attempting to sync or author blocks. - pub spec_name: VersionString, + pub spec_name: RuntimeString, /// Name of the implementation of the spec. This is of little consequence for the node /// and serves only to differentiate code of different implementation teams. For this /// codebase, it will be parity-polkadot. If there were a non-Rust implementation of the /// Polkadot runtime (e.g. C++), then it would identify itself with an accordingly different /// `impl_name`. - pub impl_name: VersionString, + pub impl_name: RuntimeString, /// `authoring_version` is the version of the authorship interface. An authoring node /// will not attempt to author blocks unless this is equal to its native runtime. diff --git a/substrate/core/state-machine/src/backend.rs b/substrate/core/state-machine/src/backend.rs index 412fe53628..1df07a887d 100644 --- a/substrate/core/state-machine/src/backend.rs +++ b/substrate/core/state-machine/src/backend.rs @@ -23,7 +23,7 @@ use std::marker::PhantomData; use hash_db::Hasher; use trie_backend::TrieBackend; use trie_backend_essence::TrieBackendStorage; -use substrate_trie::{TrieDBMut, TrieMut, MemoryDB, trie_root}; +use substrate_trie::{TrieDBMut, TrieMut, MemoryDB, trie_root, child_trie_root}; use heapsize::HeapSizeOf; /// A state backend is used to read state data and can have changes committed @@ -35,7 +35,7 @@ pub trait Backend { type Error: super::Error; /// Storage changes to be applied if committing - type Transaction; + type Transaction: Consolidate + Default; /// Type of trie backend storage. type TrieBackendStorage: TrieBackendStorage; @@ -43,11 +43,22 @@ pub trait Backend { /// Get keyed storage associated with specific address, or None if there is nothing associated. fn storage(&self, key: &[u8]) -> Result>, Self::Error>; + /// Get keyed child storage associated with specific address, or None if there is nothing associated. + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error>; + /// true if a key exists in storage. fn exists_storage(&self, key: &[u8]) -> Result { Ok(self.storage(key)?.is_some()) } + /// true if a key exists in child storage. + fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result { + Ok(self.child_storage(storage_key, key)?.is_some()) + } + + /// Retrieve all entries keys of child storage and call `f` for each of those keys. + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F); + /// Retrieve all entries keys of which start with the given prefix and /// call `f` for each of those keys. fn for_keys_with_prefix(&self, prefix: &[u8], f: F); @@ -59,6 +70,13 @@ pub trait Backend { I: IntoIterator, Option>)>, H::Out: Ord; + /// Calculate the child storage root, with given delta over what is already stored in + /// the backend, and produce a "transaction" that can be used to commit. + fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord; + /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(Vec, Vec)>; @@ -66,6 +84,30 @@ pub trait Backend { fn try_into_trie_backend(self) -> Option>; } +/// Trait that allows consolidate two transactions together. +pub trait Consolidate { + /// Consolidate two transactions into one. + fn consolidate(&mut self, other: Self); +} + +impl Consolidate for () { + fn consolidate(&mut self, _: Self) { + () + } +} + +impl Consolidate for Vec<(Option>, Vec, Option>)> { + fn consolidate(&mut self, mut other: Self) { + self.append(&mut other); + } +} + +impl Consolidate for MemoryDB { + fn consolidate(&mut self, other: Self) { + MemoryDB::consolidate(self, other) + } +} + /// Error impossible. // TODO: use `!` type when stabilized. #[derive(Debug)] @@ -85,7 +127,7 @@ impl error::Error for Void { /// tests. #[derive(Eq)] pub struct InMemory { - inner: HashMap, Vec>, + inner: HashMap>, HashMap, Vec>>, _hasher: PhantomData, } @@ -117,10 +159,10 @@ impl InMemory where H::Out: HeapSizeOf { /// Copy the state, with applied updates pub fn update(&self, changes: >::Transaction) -> Self { let mut inner: HashMap<_, _> = self.inner.clone(); - for (key, val) in changes { + for (storage_key, key, val) in changes { match val { - Some(v) => { inner.insert(key, v); }, - None => { inner.remove(&key); }, + Some(v) => { inner.entry(storage_key).or_default().insert(key, v); }, + None => { inner.entry(storage_key).or_default().remove(&key); }, } } @@ -128,8 +170,8 @@ impl InMemory where H::Out: HeapSizeOf { } } -impl From, Vec>> for InMemory { - fn from(inner: HashMap, Vec>) -> Self { +impl From>, HashMap, Vec>>> for InMemory { + fn from(inner: HashMap>, HashMap, Vec>>) -> Self { InMemory { inner: inner, _hasher: PhantomData, @@ -137,23 +179,42 @@ impl From, Vec>> for InMemory { } } +impl From, Vec>> for InMemory { + fn from(inner: HashMap, Vec>) -> Self { + let mut expanded = HashMap::new(); + expanded.insert(None, inner); + InMemory { + inner: expanded, + _hasher: PhantomData, + } + } +} + impl super::Error for Void {} impl Backend for InMemory where H::Out: HeapSizeOf { type Error = Void; - type Transaction = Vec<(Vec, Option>)>; + type Transaction = Vec<(Option>, Vec, Option>)>; type TrieBackendStorage = MemoryDB; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - Ok(self.inner.get(key).map(Clone::clone)) + Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) + } + + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { + Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone))) } fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.inner.get(key).is_some()) + Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false)) } fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.inner.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f); + self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); + } + + fn for_keys_in_child_storage(&self, storage_key: &[u8], mut f: F) { + self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k))); } fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) @@ -161,7 +222,7 @@ impl Backend for InMemory where H::Out: HeapSizeOf { I: IntoIterator, Option>)>, ::Out: Ord, { - let existing_pairs = self.inner.iter().map(|(k, v)| (k.clone(), Some(v.clone()))); + let existing_pairs = self.inner.get(&None).into_iter().flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); let transaction: Vec<_> = delta.into_iter().collect(); let root = trie_root::(existing_pairs.chain(transaction.iter().cloned()) @@ -170,16 +231,52 @@ impl Backend for InMemory where H::Out: HeapSizeOf { .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) ); - (root, transaction) + let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect(); + + (root, full_transaction) + } + + fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord + { + let storage_key = storage_key.to_vec(); + + let existing_pairs = self.inner.get(&Some(storage_key.clone())).into_iter().flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + + let transaction: Vec<_> = delta.into_iter().collect(); + let root = child_trie_root::( + &storage_key, + existing_pairs.chain(transaction.iter().cloned()) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) + ); + + let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect(); + + (root, full_transaction) } fn pairs(&self) -> Vec<(Vec, Vec)> { - self.inner.iter().map(|(k, v)| (k.clone(), v.clone())).collect() + self.inner.get(&None).into_iter().flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))).collect() } fn try_into_trie_backend(self) -> Option> { let mut mdb = MemoryDB::default(); // TODO: should be more correct and use ::new() - let root = insert_into_memory_db::(&mut mdb, self.inner.into_iter())?; + let mut root = None; + for (storage_key, map) in self.inner { + if storage_key != None { + let _ = insert_into_memory_db::(&mut mdb, map.into_iter())?; + } else { + root = Some(insert_into_memory_db::(&mut mdb, map.into_iter())?); + } + } + let root = match root { + Some(root) => root, + None => insert_into_memory_db::(&mut mdb, ::std::iter::empty())?, + }; Some(TrieBackend::new(mdb, root)) } } diff --git a/substrate/core/state-machine/src/changes_trie/build.rs b/substrate/core/state-machine/src/changes_trie/build.rs index 8c317d760e..991b395938 100644 --- a/substrate/core/state-machine/src/changes_trie/build.rs +++ b/substrate/core/state-machine/src/changes_trie/build.rs @@ -73,10 +73,10 @@ fn prepare_extrinsics_input( where B: Backend, H: Hasher, - + { let mut extrinsic_map = BTreeMap::, BTreeSet>::new(); - for (key, val) in changes.prospective.iter().chain(changes.committed.iter()) { + for (key, val) in changes.prospective.top.iter().chain(changes.committed.top.iter()) { let extrinsics = match val.extrinsics { Some(ref extrinsics) => extrinsics, None => continue, @@ -274,7 +274,7 @@ mod test { let (backend, storage, mut changes) = prepare_for_build(); // 110: missing from backend, set to None in overlay - changes.prospective.insert(vec![110], OverlayedValue { + changes.prospective.top.insert(vec![110], OverlayedValue { value: None, extrinsics: Some(vec![1].into_iter().collect()) }); diff --git a/substrate/core/state-machine/src/changes_trie/mod.rs b/substrate/core/state-machine/src/changes_trie/mod.rs index 80f3bd5598..fb16cb54d6 100644 --- a/substrate/core/state-machine/src/changes_trie/mod.rs +++ b/substrate/core/state-machine/src/changes_trie/mod.rs @@ -31,6 +31,9 @@ //! block }, containing entries for every storage key that has been changed in //! the last N*digest_level-1 blocks (except for genesis block), mapping these keys //! to the set of lower-level digest blocks. +//! +//! Changes trie only contains the top level storage changes. Sub-level changes +//! are propogated through its storage root on the top level storage. mod build; mod build_iterator; diff --git a/substrate/core/state-machine/src/ext.rs b/substrate/core/state-machine/src/ext.rs index b0fc7347d3..b052942e49 100644 --- a/substrate/core/state-machine/src/ext.rs +++ b/substrate/core/state-machine/src/ext.rs @@ -17,11 +17,12 @@ //! Conrete externalities implementation. use std::{error, fmt, cmp::Ord}; -use backend::Backend; +use backend::{Backend, Consolidate}; use changes_trie::{Storage as ChangesTrieStorage, compute_changes_trie_root}; use {Externalities, OverlayedChanges}; use hash_db::Hasher; -use substrate_trie::{MemoryDB, TrieDBMut, TrieMut}; +use primitives::storage::well_known_keys::is_child_storage_key; +use substrate_trie::{MemoryDB, TrieDBMut, TrieMut, default_child_trie_root, is_child_trie_key_valid}; use heapsize::HeapSizeOf; const EXT_NOT_ALLOWED_TO_FAIL: &'static str = "Externalities not allowed to fail within runtime"; @@ -122,6 +123,31 @@ where fn mark_dirty(&mut self) { self.storage_transaction = None; } + + /// Fetch child storage root together with its transaction. + fn child_storage_root_transaction(&mut self, storage_key: &[u8]) -> (Vec, B::Transaction) { + self.mark_dirty(); + + let (root, transaction) = { + let delta = self.overlay.committed.children.get(storage_key) + .into_iter() + .flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone()))) + .chain(self.overlay.prospective.children.get(storage_key) + .into_iter() + .flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone())))); + + self.backend.child_storage_root(storage_key, delta) + }; + + let root_val = if root == default_child_trie_root::(storage_key) { + None + } else { + Some(root.clone()) + }; + self.overlay.sync_child_storage_root(storage_key, root_val); + + (root, transaction) + } } #[cfg(test)] @@ -137,8 +163,8 @@ where self.backend.pairs().iter() .map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec()))) - .chain(self.overlay.committed.clone().into_iter().map(|(k, v)| (k, v.value))) - .chain(self.overlay.prospective.clone().into_iter().map(|(k, v)| (k, v.value))) + .chain(self.overlay.committed.top.clone().into_iter().map(|(k, v)| (k, v.value))) + .chain(self.overlay.prospective.top.clone().into_iter().map(|(k, v)| (k, v.value))) .collect::>() .into_iter() .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) @@ -158,6 +184,11 @@ where self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)) } + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { + self.overlay.child_storage(storage_key, key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| + self.backend.child_storage(storage_key, key).expect(EXT_NOT_ALLOWED_TO_FAIL)) + } + fn exists_storage(&self, key: &[u8]) -> bool { match self.overlay.storage(key) { Some(x) => x.is_some(), @@ -165,12 +196,52 @@ where } } + fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> bool { + match self.overlay.child_storage(storage_key, key) { + Some(x) => x.is_some(), + _ => self.backend.exists_child_storage(storage_key, key).expect(EXT_NOT_ALLOWED_TO_FAIL), + } + } + fn place_storage(&mut self, key: Vec, value: Option>) { + if is_child_storage_key(&key) { + warn!(target: "trie", "Refuse to directly set child storage key"); + return; + } + self.mark_dirty(); self.overlay.set_storage(key, value); } + fn place_child_storage(&mut self, storage_key: Vec, key: Vec, value: Option>) -> bool { + if !is_child_storage_key(&storage_key) || !is_child_trie_key_valid::(&storage_key) { + return false; + } + + self.mark_dirty(); + self.overlay.set_child_storage(storage_key, key, value); + + true + } + + fn kill_child_storage(&mut self, storage_key: &[u8]) { + if !is_child_storage_key(storage_key) || !is_child_trie_key_valid::(storage_key) { + return; + } + + self.mark_dirty(); + self.overlay.clear_child_storage(storage_key); + self.backend.for_keys_in_child_storage(storage_key, |key| { + self.overlay.set_child_storage(storage_key.to_vec(), key.to_vec(), None); + }); + } + fn clear_prefix(&mut self, prefix: &[u8]) { + if is_child_storage_key(prefix) { + warn!(target: "trie", "Refuse to directly clear prefix that is part of child storage key"); + return; + } + self.mark_dirty(); self.overlay.clear_prefix(prefix); self.backend.for_keys_with_prefix(prefix, |key| { @@ -183,19 +254,40 @@ where } fn storage_root(&mut self) -> H::Out { - if let Some((_, ref root)) = self.storage_transaction { + if let Some((_, ref root)) = self.storage_transaction { return root.clone(); } - // compute and memoize - let delta = self.overlay.committed.iter().map(|(k, v)| (k.clone(), v.value.clone())) - .chain(self.overlay.prospective.iter().map(|(k, v)| (k.clone(), v.value.clone()))); + let mut transaction = B::Transaction::default(); + let child_storage_keys: Vec<_> = self.overlay.prospective.children.keys().cloned().collect(); - let (root, transaction) = self.backend.storage_root(delta); + for key in child_storage_keys { + let (_, t) = self.child_storage_root_transaction(&key); + transaction.consolidate(t); + } + + // compute and memoize + let delta = self.overlay.committed.top.iter().map(|(k, v)| (k.clone(), v.value.clone())) + .chain(self.overlay.prospective.top.iter().map(|(k, v)| (k.clone(), v.value.clone()))); + + let (root, t) = self.backend.storage_root(delta); + transaction.consolidate(t); self.storage_transaction = Some((transaction, root)); root } + fn child_storage_root(&mut self, storage_key: &[u8]) -> Option> { + if !is_child_storage_key(storage_key) || !is_child_trie_key_valid::(storage_key) { + return None; + } + + if self.storage_transaction.is_some() { + return Some(self.storage(storage_key).unwrap_or(default_child_trie_root::(storage_key))); + } + + Some(self.child_storage_root_transaction(storage_key).0) + } + fn storage_changes_root(&mut self, block: u64) -> Option { let root_and_tx = compute_changes_trie_root::<_, T, H>( self.backend, @@ -287,7 +379,7 @@ mod tests { #[test] fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() { let mut overlay = prepare_overlay_with_changes(); - overlay.prospective.get_mut(&vec![1]).unwrap().value = None; + overlay.prospective.top.get_mut(&vec![1]).unwrap().value = None; let storage = TestChangesTrieStorage::new(); let backend = TestBackend::default(); let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage)); diff --git a/substrate/core/state-machine/src/lib.rs b/substrate/core/state-machine/src/lib.rs index 50a8a83e6f..4853b2afb5 100644 --- a/substrate/core/state-machine/src/lib.rs +++ b/substrate/core/state-machine/src/lib.rs @@ -97,33 +97,62 @@ pub trait Externalities { /// Read storage of current contract being called. fn storage(&self, key: &[u8]) -> Option>; + /// Read child storage of current contract being called. + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option>; + /// Set storage entry `key` of current contract being called (effective immediately). fn set_storage(&mut self, key: Vec, value: Vec) { self.place_storage(key, Some(value)); } + /// Set child storage entry `key` of current contract being called (effective immediately). + fn set_child_storage(&mut self, storage_key: Vec, key: Vec, value: Vec) -> bool { + self.place_child_storage(storage_key, key, Some(value)) + } + /// Clear a storage entry (`key`) of current contract being called (effective immediately). fn clear_storage(&mut self, key: &[u8]) { self.place_storage(key.to_vec(), None); } - /// Clear a storage entry (`key`) of current contract being called (effective immediately). + /// Clear a child storage entry (`key`) of current contract being called (effective immediately). + fn clear_child_storage(&mut self, storage_key: &[u8], key: &[u8]) -> bool { + self.place_child_storage(storage_key.to_vec(), key.to_vec(), None) + } + + /// Whether a storage entry exists. fn exists_storage(&self, key: &[u8]) -> bool { self.storage(key).is_some() } + /// Whether a child storage entry exists. + fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> bool { + self.child_storage(storage_key, key).is_some() + } + + /// Clear an entire child storage. + fn kill_child_storage(&mut self, storage_key: &[u8]); + /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). fn place_storage(&mut self, key: Vec, value: Option>); + /// Set or clear a child storage entry. Return whether the operation succeeds. + fn place_child_storage(&mut self, storage_key: Vec, key: Vec, value: Option>) -> bool; + /// Get the identity of the chain. fn chain_id(&self) -> u64; - /// Get the trie root of the current storage map. + /// Get the trie root of the current storage map. This will also update all child storage keys in the top-level storage map. fn storage_root(&mut self) -> H::Out where H::Out: Ord; + /// Get the trie root of a child storage map. This will also update the value of the child storage keys in the top-level storage map. If the storage root equals default hash as defined by trie, the key in top-level storage map will be removed. + /// + /// Returns None if key provided is not a storage key. This can due to not being started with CHILD_STORAGE_KEY_PREFIX, or the trie implementation regards the key as invalid. + fn child_storage_root(&mut self, storage_key: &[u8]) -> Option>; + /// Get the change trie root of the current storage overlay at given block. fn storage_changes_root(&mut self, block: u64) -> Option where H::Out: Ord; } @@ -477,6 +506,7 @@ where mod tests { use std::collections::HashMap; use codec::Encode; + use overlayed_changes::OverlayedValue; use super::*; use super::backend::InMemory; use super::ext::Ext; @@ -600,12 +630,12 @@ mod tests { let backend = InMemory::::from(initial).try_into_trie_backend().unwrap(); let mut overlay = OverlayedChanges { committed: map![ - b"aba".to_vec() => Some(b"1312".to_vec()).into(), - b"bab".to_vec() => Some(b"228".to_vec()).into() + b"aba".to_vec() => OverlayedValue::from(Some(b"1312".to_vec())), + b"bab".to_vec() => OverlayedValue::from(Some(b"228".to_vec())) ], prospective: map![ - b"abd".to_vec() => Some(b"69".to_vec()).into(), - b"bbd".to_vec() => Some(b"42".to_vec()).into() + b"abd".to_vec() => OverlayedValue::from(Some(b"69".to_vec())), + b"bbd".to_vec() => OverlayedValue::from(Some(b"42".to_vec())) ], ..Default::default() }; @@ -631,6 +661,19 @@ mod tests { ); } + #[test] + fn set_child_storage_works() { + let backend = InMemory::::default().try_into_trie_backend().unwrap(); + let changes_trie_storage = InMemoryChangesTrieStorage::new(); + let mut overlay = OverlayedChanges::default(); + let mut ext = Ext::new(&mut overlay, &backend, Some(&changes_trie_storage)); + + assert!(ext.set_child_storage(b":child_storage:testchild".to_vec(), b"abc".to_vec(), b"def".to_vec())); + assert_eq!(ext.child_storage(b":child_storage:testchild", b"abc"), Some(b"def".to_vec())); + ext.kill_child_storage(b":child_storage:testchild"); + assert_eq!(ext.child_storage(b":child_storage:testchild", b"abc"), None); + } + #[test] fn prove_read_and_proof_check_works() { // fetch read proof from 'remote' full node diff --git a/substrate/core/state-machine/src/overlayed_changes.rs b/substrate/core/state-machine/src/overlayed_changes.rs index e3e8d18b36..15012ac66e 100644 --- a/substrate/core/state-machine/src/overlayed_changes.rs +++ b/substrate/core/state-machine/src/overlayed_changes.rs @@ -16,6 +16,7 @@ //! The overlayed changes to state. +#[cfg(test)] use std::iter::FromIterator; use std::collections::{HashMap, HashSet}; use codec::Decode; use changes_trie::{NO_EXTRINSIC_INDEX, Configuration as ChangesTrieConfig}; @@ -28,9 +29,9 @@ use primitives::storage::well_known_keys::EXTRINSIC_INDEX; #[derive(Debug, Default, Clone)] pub struct OverlayedChanges { /// Changes that are not yet committed. - pub(crate) prospective: HashMap, OverlayedValue>, + pub(crate) prospective: OverlayedChangeSet, /// Committed changes. - pub(crate) committed: HashMap, OverlayedValue>, + pub(crate) committed: OverlayedChangeSet, /// Changes trie configuration. None by default, but could be installed by the /// runtime if it supports change tries. pub(crate) changes_trie_config: Option, @@ -47,6 +48,39 @@ pub struct OverlayedValue { pub extrinsics: Option>, } +/// Prospective or committed overlayed change set. +#[derive(Debug, Default, Clone)] +#[cfg_attr(test, derive(PartialEq))] +pub struct OverlayedChangeSet { + /// Top level storage changes. + pub top: HashMap, OverlayedValue>, + /// Child storage changes. + pub children: HashMap, (Option>, HashMap, Option>>)>, +} + +#[cfg(test)] +impl FromIterator<(Vec, OverlayedValue)> for OverlayedChangeSet { + fn from_iter, OverlayedValue)>>(iter: T) -> Self { + Self { + top: iter.into_iter().collect(), + children: Default::default(), + } + } +} + +impl OverlayedChangeSet { + /// Whether the change set is empty. + pub fn is_empty(&self) -> bool { + self.top.is_empty() && self.children.is_empty() + } + + /// Clear the change set. + pub fn clear(&mut self) { + self.top.clear(); + self.children.clear(); + } +} + impl OverlayedChanges { /// Sets the changes trie configuration. /// @@ -68,17 +102,36 @@ impl OverlayedChanges { /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. pub fn storage(&self, key: &[u8]) -> Option> { - self.prospective.get(key) - .or_else(|| self.committed.get(key)) + self.prospective.top.get(key) + .or_else(|| self.committed.top.get(key)) .map(|x| x.value.as_ref().map(AsRef::as_ref)) } + /// Returns a double-Option: None if the key is unknown (i.e. and the query should be refered + /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose + /// value has been set. + pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { + if let Some(map) = self.prospective.children.get(storage_key) { + if let Some(val) = map.1.get(key) { + return Some(val.as_ref().map(AsRef::as_ref)); + } + } + + if let Some(map) = self.committed.children.get(storage_key) { + if let Some(val) = map.1.get(key) { + return Some(val.as_ref().map(AsRef::as_ref)); + } + } + + None + } + /// Inserts the given key-value pair into the prospective change set. /// /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_storage(&mut self, key: Vec, val: Option>) { let extrinsic_index = self.extrinsic_index(); - let entry = self.prospective.entry(key).or_default(); + let entry = self.prospective.top.entry(key).or_default(); entry.value = val; if let Some(extrinsic) = extrinsic_index { @@ -87,6 +140,57 @@ impl OverlayedChanges { } } + /// Inserts the given key-value pair into the prospective child change set. + /// + /// `None` can be used to delete a value specified by the given key. + pub(crate) fn set_child_storage(&mut self, storage_key: Vec, key: Vec, val: Option>) { + let extrinsic_index = self.extrinsic_index(); + let map_entry = self.prospective.children.entry(storage_key).or_default(); + map_entry.1.insert(key, val); + + if let Some(extrinsic) = extrinsic_index { + map_entry.0.get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + + /// Sync the child storage root. + pub(crate) fn sync_child_storage_root(&mut self, storage_key: &[u8], root: Option>) { + let entry = self.prospective.top.entry(storage_key.to_vec()).or_default(); + entry.value = root; + + if let Some((Some(extrinsics), _)) = self.prospective.children.get(storage_key) { + for extrinsic in extrinsics { + entry.extrinsics.get_or_insert_with(Default::default) + .insert(*extrinsic); + } + } + } + + /// Clear child storage of given storage key. + /// + /// NOTE that this doesn't take place immediately but written into the prospective + /// change set, and still can be reverted by [`discard_prospective`]. + /// + /// [`discard_prospective`]: #method.discard_prospective + pub(crate) fn clear_child_storage(&mut self, storage_key: &[u8]) { + let extrinsic_index = self.extrinsic_index(); + let map_entry = self.prospective.children.entry(storage_key.to_vec()).or_default(); + + if let Some(extrinsic) = extrinsic_index { + map_entry.0.get_or_insert_with(Default::default) + .insert(extrinsic); + } + + map_entry.1.values_mut().for_each(|e| *e = None); + + if let Some((_, committed_map)) = self.committed.children.get(storage_key) { + for (key, _) in committed_map.iter() { + map_entry.1.insert(key.clone(), None); + } + } + } + /// Removes all key-value pairs which keys share the given prefix. /// /// NOTE that this doesn't take place immediately but written into the prospective @@ -98,7 +202,7 @@ impl OverlayedChanges { // Iterate over all prospective and mark all keys that share // the given prefix as removed (None). - for (key, entry) in self.prospective.iter_mut() { + for (key, entry) in self.prospective.top.iter_mut() { if key.starts_with(prefix) { entry.value = None; @@ -111,9 +215,9 @@ impl OverlayedChanges { // Then do the same with keys from commited changes. // NOTE that we are making changes in the prospective change set. - for key in self.committed.keys() { + for key in self.committed.top.keys() { if key.starts_with(prefix) { - let entry = self.prospective.entry(key.clone()).or_default(); + let entry = self.prospective.top.entry(key.clone()).or_default(); entry.value = None; if let Some(extrinsic) = extrinsic_index { @@ -134,8 +238,8 @@ impl OverlayedChanges { if self.committed.is_empty() { ::std::mem::swap(&mut self.prospective, &mut self.committed); } else { - for (key, val) in self.prospective.drain() { - let entry = self.committed.entry(key).or_default(); + for (key, val) in self.prospective.top.drain() { + let entry = self.committed.top.entry(key).or_default(); entry.value = val.value; if let Some(prospective_extrinsics) = val.extrinsics { @@ -143,16 +247,16 @@ impl OverlayedChanges { .extend(prospective_extrinsics); } } - } - } + for (storage_key, map) in self.prospective.children.drain() { + let entry = self.committed.children.entry(storage_key).or_default(); + entry.1.extend(map.1.iter().map(|(k, v)| (k.clone(), v.clone()))); - /// Drain committed changes to an iterator. - /// - /// Panics: - /// Will panic if there are any uncommitted prospective changes. - pub fn drain<'a>(&'a mut self) -> impl Iterator, OverlayedValue)> + 'a { - assert!(self.prospective.is_empty()); - self.committed.drain() + if let Some(prospective_extrinsics) = map.0 { + entry.0.get_or_insert_with(Default::default) + .extend(prospective_extrinsics); + } + } + } } /// Consume `OverlayedChanges` and take committed set. @@ -161,14 +265,14 @@ impl OverlayedChanges { /// Will panic if there are any uncommitted prospective changes. pub fn into_committed(self) -> impl Iterator, Option>)> { assert!(self.prospective.is_empty()); - self.committed.into_iter().map(|(k, v)| (k, v.value)) + self.committed.top.into_iter().map(|(k, v)| (k, v.value)) } /// Inserts storage entry responsible for current extrinsic index. #[cfg(test)] pub(crate) fn set_extrinsic_index(&mut self, extrinsic_index: u32) { use codec::Encode; - self.prospective.insert(EXTRINSIC_INDEX.to_vec(), OverlayedValue { + self.prospective.top.insert(EXTRINSIC_INDEX.to_vec(), OverlayedValue { value: Some(extrinsic_index.encode()), extrinsics: None, }); @@ -293,7 +397,7 @@ mod tests { digest_interval: 4, digest_levels: 1, }), true); assert_eq!( - strip_extrinsic_index(&overlay.prospective), + strip_extrinsic_index(&overlay.prospective.top), vec![ (vec![1], OverlayedValue { value: Some(vec![2]), extrinsics: Some(vec![0].into_iter().collect()) }), ].into_iter().collect(), @@ -329,7 +433,7 @@ mod tests { overlay.set_extrinsic_index(2); overlay.set_storage(vec![1], Some(vec![6])); - assert_eq!(strip_extrinsic_index(&overlay.prospective), + assert_eq!(strip_extrinsic_index(&overlay.prospective.top), vec![ (vec![1], OverlayedValue { value: Some(vec![6]), extrinsics: Some(vec![0, 2].into_iter().collect()) }), (vec![3], OverlayedValue { value: Some(vec![4]), extrinsics: Some(vec![1].into_iter().collect()) }), @@ -344,14 +448,14 @@ mod tests { overlay.set_extrinsic_index(4); overlay.set_storage(vec![1], Some(vec![8])); - assert_eq!(strip_extrinsic_index(&overlay.committed), + assert_eq!(strip_extrinsic_index(&overlay.committed.top), vec![ (vec![1], OverlayedValue { value: Some(vec![6]), extrinsics: Some(vec![0, 2].into_iter().collect()) }), (vec![3], OverlayedValue { value: Some(vec![4]), extrinsics: Some(vec![1].into_iter().collect()) }), (vec![100], OverlayedValue { value: Some(vec![101]), extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), ].into_iter().collect()); - assert_eq!(strip_extrinsic_index(&overlay.prospective), + assert_eq!(strip_extrinsic_index(&overlay.prospective.top), vec![ (vec![1], OverlayedValue { value: Some(vec![8]), extrinsics: Some(vec![4].into_iter().collect()) }), (vec![3], OverlayedValue { value: Some(vec![7]), extrinsics: Some(vec![3].into_iter().collect()) }), @@ -359,7 +463,7 @@ mod tests { overlay.commit_prospective(); - assert_eq!(strip_extrinsic_index(&overlay.committed), + assert_eq!(strip_extrinsic_index(&overlay.committed.top), vec![ (vec![1], OverlayedValue { value: Some(vec![8]), extrinsics: Some(vec![0, 2, 4].into_iter().collect()) }), (vec![3], OverlayedValue { value: Some(vec![7]), extrinsics: Some(vec![1, 3].into_iter().collect()) }), diff --git a/substrate/core/state-machine/src/proving_backend.rs b/substrate/core/state-machine/src/proving_backend.rs index d17eed7bc8..d8c9ae4695 100644 --- a/substrate/core/state-machine/src/proving_backend.rs +++ b/substrate/core/state-machine/src/proving_backend.rs @@ -20,7 +20,7 @@ use std::cell::RefCell; use hash_db::Hasher; use heapsize::HeapSizeOf; use hash_db::HashDB; -use trie::{TrieDB, Trie, Recorder, MemoryDB, TrieError}; +use trie::{Recorder, MemoryDB, TrieError, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys}; use trie_backend::TrieBackend; use trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use {Error, ExecutionError, Backend}; @@ -47,10 +47,21 @@ impl<'a, S, H> ProvingBackendEssence<'a, S, H> let map_e = |e| format!("Trie lookup error: {}", e); - TrieDB::::new(&eph, self.backend.root()).map_err(map_e)? - .get_with(key, &mut *self.proof_recorder) - .map(|x| x.map(|val| val.to_vec())) - .map_err(map_e) + read_trie_value_with(&eph, self.backend.root(), key, &mut *self.proof_recorder).map_err(map_e) + } + + pub fn child_storage(&mut self, storage_key: &[u8], key: &[u8]) -> Result>, String> { + let root = self.storage(storage_key)?.unwrap_or(default_child_trie_root::(storage_key)); + + let mut read_overlay = MemoryDB::default(); + let eph = Ephemeral::new( + self.backend.backend_storage(), + &mut read_overlay, + ); + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_child_trie_value_with(storage_key, &eph, &root, key, &mut *self.proof_recorder).map_err(map_e) } pub fn record_all_keys(&mut self) { @@ -62,20 +73,7 @@ impl<'a, S, H> ProvingBackendEssence<'a, S, H> let mut iter = move || -> Result<(), Box>> { let root = self.backend.root(); - let trie = TrieDB::::new(&eph, root)?; - let iter = trie.iter()?; - - for x in iter { - let (key, _) = x?; - - // there's currently no API like iter_with() - // => use iter to enumerate all keys AND lookup each - // key using get_with - trie.get_with(&key, &mut *self.proof_recorder) - .map(|x| x.map(|val| val.to_vec()))?; - } - - Ok(()) + record_all_keys::(&eph, root, &mut *self.proof_recorder) }; if let Err(e) = iter() { @@ -128,6 +126,18 @@ impl Backend for ProvingBackend }.storage(key) } + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { + ProvingBackendEssence { + backend: self.backend.essence(), + proof_recorder: &mut *self.proof_recorder.try_borrow_mut() + .expect("only fails when already borrowed; child_storage() is non-reentrant; qed"), + }.child_storage(storage_key, key) + } + + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { + self.backend.for_keys_in_child_storage(storage_key, f) + } + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { self.backend.for_keys_with_prefix(prefix, f) } @@ -142,6 +152,14 @@ impl Backend for ProvingBackend self.backend.storage_root(delta) } + fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord + { + self.backend.child_storage_root(storage_key, delta) + } + fn try_into_trie_backend(self) -> Option> { None } @@ -211,7 +229,7 @@ mod tests { #[test] fn proof_recorded_and_checked() { - let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); + let contents = (0..64).map(|i| (None, vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemory::::default(); let in_memory = in_memory.update(contents); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; diff --git a/substrate/core/state-machine/src/testing.rs b/substrate/core/state-machine/src/testing.rs index c26c31dda8..ea67773ed2 100644 --- a/substrate/core/state-machine/src/testing.rs +++ b/substrate/core/state-machine/src/testing.rs @@ -103,6 +103,10 @@ impl Externalities for TestExternalities where H::Out: Ord + He self.inner.get(key).map(|x| x.to_vec()) } + fn child_storage(&self, _storage_key: &[u8], _key: &[u8]) -> Option> { + None + } + fn place_storage(&mut self, key: Vec, maybe_value: Option>) { self.changes.set_storage(key.clone(), maybe_value.clone()); match maybe_value { @@ -111,6 +115,12 @@ impl Externalities for TestExternalities where H::Out: Ord + He } } + fn place_child_storage(&mut self, _storage_key: Vec, _key: Vec, _value: Option>) -> bool { + false + } + + fn kill_child_storage(&mut self, _storage_key: &[u8]) { } + fn clear_prefix(&mut self, prefix: &[u8]) { self.changes.clear_prefix(prefix); self.inner.retain(|key, _| !key.starts_with(prefix)); @@ -122,6 +132,10 @@ impl Externalities for TestExternalities where H::Out: Ord + He trie_root::(self.inner.clone()) } + fn child_storage_root(&mut self, _storage_key: &[u8]) -> Option> { + None + } + fn storage_changes_root(&mut self, block: u64) -> Option { compute_changes_trie_root::<_, _, H>( &InMemory::default(), diff --git a/substrate/core/state-machine/src/trie_backend.rs b/substrate/core/state-machine/src/trie_backend.rs index 0a4c2bb3ba..6f720a0f09 100644 --- a/substrate/core/state-machine/src/trie_backend.rs +++ b/substrate/core/state-machine/src/trie_backend.rs @@ -18,7 +18,7 @@ use hash_db::Hasher; use heapsize::HeapSizeOf; -use trie::{TrieDB, TrieDBMut, TrieError, Trie, TrieMut, MemoryDB}; +use trie::{TrieDB, TrieError, Trie, MemoryDB, delta_trie_root, default_child_trie_root, child_delta_trie_root}; use trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}; use {Backend}; @@ -64,10 +64,18 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.storage(key) } + fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, Self::Error> { + self.essence.child_storage(storage_key, key) + } + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { self.essence.for_keys_with_prefix(prefix, f) } + fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { + self.essence.for_keys_in_child_storage(storage_key, f) + } + fn pairs(&self) -> Vec<(Vec, Vec)> { let mut read_overlay = MemoryDB::default(); // TODO: use new for correctness let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); @@ -97,22 +105,45 @@ impl, H: Hasher> Backend for TrieBackend where { let mut write_overlay = MemoryDB::default(); let mut root = *self.essence.root(); + { let mut eph = Ephemeral::new( self.essence.backend_storage(), &mut write_overlay, ); - let mut trie = TrieDBMut::::from_existing(&mut eph, &mut root).expect("prior state root to exist"); // TODO: handle gracefully - for (key, change) in delta { - let result = match change { - Some(val) => trie.insert(&key, &val), - None => trie.remove(&key), // TODO: archive mode - }; + match delta_trie_root::(&mut eph, root, delta) { + Ok(ret) => root = ret, + Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + } + } - if let Err(e) = result { - warn!(target: "trie", "Failed to write to trie: {}", e); - } + (root, write_overlay) + } + + fn child_storage_root(&self, storage_key: &[u8], delta: I) -> (Vec, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord + { + let mut write_overlay = MemoryDB::default(); + let mut root = match self.storage(storage_key) { + Ok(value) => value.unwrap_or(default_child_trie_root::(storage_key)), + Err(e) => { + warn!(target: "trie", "Failed to read child storage root: {}", e); + default_child_trie_root::(storage_key) + }, + }; + + { + let mut eph = Ephemeral::new( + self.essence.backend_storage(), + &mut write_overlay, + ); + + match child_delta_trie_root::(storage_key, &mut eph, root.clone(), delta) { + Ok(ret) => root = ret, + Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } } @@ -128,6 +159,7 @@ impl, H: Hasher> Backend for TrieBackend where pub mod tests { use std::collections::HashSet; use primitives::{Blake2Hasher, H256}; + use trie::{TrieMut, TrieDBMut}; use super::*; fn test_db() -> (MemoryDB, H256) { diff --git a/substrate/core/state-machine/src/trie_backend_essence.rs b/substrate/core/state-machine/src/trie_backend_essence.rs index 335b325cec..56424ae022 100644 --- a/substrate/core/state-machine/src/trie_backend_essence.rs +++ b/substrate/core/state-machine/src/trie_backend_essence.rs @@ -22,7 +22,7 @@ use std::ops::Deref; use std::sync::Arc; use hash_db::{self, Hasher}; use heapsize::HeapSizeOf; -use trie::{TrieDB, Trie, MemoryDB, DBValue, TrieError}; +use trie::{TrieDB, Trie, MemoryDB, DBValue, TrieError, default_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie}; use changes_trie::Storage as ChangesTrieStorage; /// Patricia trie-based storage trait. @@ -66,8 +66,43 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let map_e = |e| format!("Trie lookup error: {}", e); - TrieDB::::new(&eph, &self.root).map_err(map_e)? - .get(key).map(|x| x.map(|val| val.to_vec())).map_err(map_e) + read_trie_value(&eph, &self.root, key).map_err(map_e) + } + + /// Get the value of child storage at given key. + pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result>, String> { + let root = self.storage(storage_key)?.unwrap_or(default_child_trie_root::(storage_key)); + + let mut read_overlay = MemoryDB::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_child_trie_value(storage_key, &eph, &root, key).map_err(map_e) + } + + /// Retrieve all entries keys of child storage and call `f` for each of those keys. + pub fn for_keys_in_child_storage(&self, storage_key: &[u8], f: F) { + let root = match self.storage(storage_key) { + Ok(v) => v.unwrap_or(default_child_trie_root::(storage_key)), + Err(e) => { + debug!(target: "trie", "Error while iterating child storage: {}", e); + return; + } + }; + + let mut read_overlay = MemoryDB::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + if let Err(e) = for_keys_in_child_trie::(storage_key, &eph, &root, f) { + debug!(target: "trie", "Error while iterating child storage: {}", e); + } } /// Execute given closure for all keys starting with prefix. diff --git a/substrate/core/test-runtime/src/lib.rs b/substrate/core/test-runtime/src/lib.rs index 73a2a54670..de678e7205 100644 --- a/substrate/core/test-runtime/src/lib.rs +++ b/substrate/core/test-runtime/src/lib.rs @@ -54,7 +54,7 @@ use rstd::prelude::*; use codec::{Encode, Decode}; use runtime_api::runtime::*; -use runtime_primitives::traits::{BlindCheckable, BlakeTwo256, Block as BlockT}; +use runtime_primitives::traits::{BlindCheckable, BlakeTwo256, Block as BlockT, Extrinsic as ExtrinsicT}; use runtime_primitives::{ApplyResult, Ed25519Signature, transaction_validity::TransactionValidity}; use runtime_version::RuntimeVersion; pub use primitives::hash::H256; @@ -115,6 +115,12 @@ impl BlindCheckable for Extrinsic { } } +impl ExtrinsicT for Extrinsic { + fn is_signed(&self) -> Option { + Some(true) + } +} + /// An identifier for an account on this system. pub type AccountId = H256; /// A simple hash type for all our hashing. @@ -185,7 +191,7 @@ impl_apis! { } } - impl BlockBuilder for Runtime { + impl BlockBuilder for Runtime { fn initialise_block(header: ::Header) { system::initialise_block(header) } @@ -202,7 +208,7 @@ impl_apis! { unimplemented!() } - fn check_inherents(_block: Block, _data: u32) -> Result<(), runtime_api::BlockBuilderError> { + fn check_inherents(_block: Block, _data: u32) -> Result<(), u32> { unimplemented!() } diff --git a/substrate/core/test-runtime/src/system.rs b/substrate/core/test-runtime/src/system.rs index 0fbbcdf4de..6bc70368a1 100644 --- a/substrate/core/test-runtime/src/system.rs +++ b/substrate/core/test-runtime/src/system.rs @@ -131,12 +131,12 @@ pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { p }; - TransactionValidity::Valid( - /* priority: */tx.amount, + TransactionValidity::Valid { + priority: tx.amount, requires, provides, - /* longevity: */64 - ) + longevity: 64 + } } diff --git a/substrate/core/test-runtime/wasm/Cargo.lock b/substrate/core/test-runtime/wasm/Cargo.lock index 3bc67944d2..b47c534a27 100644 --- a/substrate/core/test-runtime/wasm/Cargo.lock +++ b/substrate/core/test-runtime/wasm/Cargo.lock @@ -470,7 +470,6 @@ name = "sr-api" version = "0.1.0" dependencies = [ "parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 0.1.0", "sr-std 0.1.0", "sr-version 0.1.0", @@ -503,7 +502,6 @@ dependencies = [ "serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 0.1.0", "sr-std 0.1.0", - "sr-version 0.1.0", "substrate-primitives 0.1.0", ] @@ -522,6 +520,7 @@ dependencies = [ "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", + "sr-primitives 0.1.0", "sr-std 0.1.0", ] diff --git a/substrate/core/transaction-pool/graph/src/base_pool.rs b/substrate/core/transaction-pool/graph/src/base_pool.rs index 9dba89fbec..21a52a9750 100644 --- a/substrate/core/transaction-pool/graph/src/base_pool.rs +++ b/substrate/core/transaction-pool/graph/src/base_pool.rs @@ -216,7 +216,7 @@ impl BasePool { } /// Returns an iterator over ready transactions in the pool. - pub fn ready<'a, 'b: 'a>(&'b self) -> impl Iterator>> + 'a { + pub fn ready(&self) -> impl Iterator>> { self.ready.get() } diff --git a/substrate/core/transaction-pool/graph/src/pool.rs b/substrate/core/transaction-pool/graph/src/pool.rs index d315425738..9f343c23d1 100644 --- a/substrate/core/transaction-pool/graph/src/pool.rs +++ b/substrate/core/transaction-pool/graph/src/pool.rs @@ -105,7 +105,7 @@ impl Pool { } match self.api.validate_transaction(at, &xt)? { - TransactionValidity::Valid(priority, requires, provides, longevity) => { + TransactionValidity::Valid { priority, requires, provides, longevity } => { Ok(base::Transaction { data: xt, hash, @@ -197,11 +197,12 @@ impl Pool { .ok_or_else(|| error::ErrorKind::Msg(format!("Invalid block id: {:?}", at)).into())? .as_(); let now = time::Instant::now(); - let to_remove = self.ready(|pending| pending - .filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx)) - .map(|tx| tx.hash.clone()) - .collect::>() - ); + let to_remove = { + self.ready() + .filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx)) + .map(|tx| tx.hash.clone()) + .collect::>() + }; let futures_to_remove: Vec> = { let p = self.pool.read(); let mut hashes = Vec::new(); @@ -266,20 +267,9 @@ impl Pool { invalid } - /// Get ready transactions ordered by priority - pub fn ready(&self, f: F) -> X where - F: FnOnce(&mut Iterator>) -> X, - { - let pool = self.pool.read(); - let mut ready = pool.ready(); - f(&mut ready) - } - - /// Returns all transactions in the pool. - /// - /// Be careful with large limit values, as querying the entire pool might be time consuming. - pub fn all(&self, limit: usize) -> Vec> { - self.ready(|it| it.take(limit).map(|ex| ex.data.clone()).collect()) + /// Get an iterator for ready transactions ordered by priority + pub fn ready(&self) -> impl Iterator> { + self.pool.read().ready() } /// Returns pool status. @@ -341,12 +331,12 @@ mod tests { if nonce < block_number { Ok(TransactionValidity::Invalid) } else { - Ok(TransactionValidity::Valid( - 4, - if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] }, - vec![vec![nonce as u8]], - 3, - )) + Ok(TransactionValidity::Valid { + priority: 4, + requires: if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] }, + provides: vec![vec![nonce as u8]], + longevity: 3, + }) } } @@ -398,7 +388,7 @@ mod tests { })).unwrap(); // then - assert_eq!(pool.ready(|pending| pending.map(|tx| tx.hash.clone()).collect::>()), vec![hash]); + assert_eq!(pool.ready().map(|v| v.hash).collect::>(), vec![hash]); } #[test] @@ -489,7 +479,7 @@ mod tests { pool.clear_stale(&BlockId::Number(5)).unwrap(); // then - assert_eq!(pool.all(3).len(), 0); + assert_eq!(pool.ready().count(), 0); assert_eq!(pool.status().future, 0); assert_eq!(pool.status().ready, 0); // make sure they are temporarily banned as well diff --git a/substrate/core/transaction-pool/graph/src/ready.rs b/substrate/core/transaction-pool/graph/src/ready.rs index 6df69c9a0c..47ab34c7fb 100644 --- a/substrate/core/transaction-pool/graph/src/ready.rs +++ b/substrate/core/transaction-pool/graph/src/ready.rs @@ -21,6 +21,7 @@ use std::{ sync::Arc, }; +use parking_lot::RwLock; use sr_primitives::traits::Member; use sr_primitives::transaction_validity::{ TransactionTag as Tag, @@ -79,6 +80,16 @@ struct ReadyTx { pub requires_offset: usize, } +impl Clone for ReadyTx { + fn clone(&self) -> Self { + ReadyTx { + transaction: self.transaction.clone(), + unlocks: self.unlocks.clone(), + requires_offset: self.requires_offset, + } + } +} + const HASH_READY: &str = r#" Every time transaction is imported its hash is placed in `ready` map and tags in `provided_tags`; Every time transaction is removed from the queue we remove the hash from `ready` map and from `provided_tags`; @@ -93,8 +104,7 @@ pub struct ReadyTransactions { /// tags that are provided by Ready transactions provided_tags: HashMap, /// Transactions that are ready (i.e. don't have any requirements external to the pool) - ready: HashMap>, - // ^^ TODO [ToDr] Consider wrapping this into `Arc>` and allow multiple concurrent iterators + ready: Arc>>>, /// Best transactions that are ready to be included to the block without any other previous transaction. best: BTreeSet>, } @@ -127,9 +137,9 @@ impl ReadyTransactions { /// - transactions that are valid for a shorter time go first /// 4. Lastly we sort by the time in the queue /// - transactions that are longer in the queue go first - pub fn get<'a>(&'a self) -> impl Iterator>> + 'a { + pub fn get(&self) -> impl Iterator>> { BestIterator { - all: &self.ready, + all: self.ready.clone(), best: self.best.clone(), awaiting: Default::default(), } @@ -144,7 +154,7 @@ impl ReadyTransactions { tx: WaitingTransaction, ) -> error::Result>>> { assert!(tx.is_ready(), "Only ready transactions can be imported."); - assert!(!self.ready.contains_key(&tx.transaction.hash), "Transaction is already imported."); + assert!(!self.ready.read().contains_key(&tx.transaction.hash), "Transaction is already imported."); self.insertion_id += 1; let insertion_id = self.insertion_id; @@ -154,11 +164,12 @@ impl ReadyTransactions { let replaced = self.replace_previous(&tx)?; let mut goes_to_best = true; + let mut ready = self.ready.write(); // Add links to transactions that unlock the current one for tag in &tx.requires { // Check if the transaction that satisfies the tag is still in the queue. if let Some(other) = self.provided_tags.get(tag) { - let mut tx = self.ready.get_mut(other).expect(HASH_READY); + let mut tx = ready.get_mut(other).expect(HASH_READY); tx.unlocks.push(hash.clone()); // this transaction depends on some other, so it doesn't go to best directly. goes_to_best = false; @@ -181,7 +192,7 @@ impl ReadyTransactions { } // insert to Ready - self.ready.insert(hash, ReadyTx { + ready.insert(hash, ReadyTx { transaction, unlocks: vec![], requires_offset: 0, @@ -192,7 +203,7 @@ impl ReadyTransactions { /// Returns true if given hash is part of the queue. pub fn contains(&self, hash: &Hash) -> bool { - self.ready.contains_key(hash) + self.ready.read().contains_key(hash) } /// Removes invalid transactions from the ready pool. @@ -204,13 +215,14 @@ impl ReadyTransactions { let mut removed = vec![]; let mut to_remove = hashes.iter().cloned().collect::>(); + let mut ready = self.ready.write(); loop { let hash = match to_remove.pop() { Some(hash) => hash, None => return removed, }; - if let Some(mut tx) = self.ready.remove(&hash) { + if let Some(mut tx) = ready.remove(&hash) { // remove entries from provided_tags for tag in &tx.transaction.transaction.provides { self.provided_tags.remove(tag); @@ -218,7 +230,7 @@ impl ReadyTransactions { // remove from unlocks for tag in &tx.transaction.transaction.requires { if let Some(hash) = self.provided_tags.get(tag) { - if let Some(tx) = self.ready.get_mut(hash) { + if let Some(tx) = ready.get_mut(hash) { remove_item(&mut tx.unlocks, &hash); } } @@ -253,7 +265,7 @@ impl ReadyTransactions { }; let res = self.provided_tags.remove(&tag) - .and_then(|hash| self.ready.remove(&hash)); + .and_then(|hash| self.ready.write().remove(&hash)); if let Some(tx) = res { let unlocks = tx.unlocks; @@ -262,9 +274,10 @@ impl ReadyTransactions { // prune previous transactions as well { let hash = &tx.hash; + let mut ready = self.ready.write(); let mut find_previous = |tag| -> Option> { let prev_hash = self.provided_tags.get(tag)?; - let tx2 = self.ready.get_mut(&prev_hash)?; + let tx2 = ready.get_mut(&prev_hash)?; remove_item(&mut tx2.unlocks, hash); // We eagerly prune previous transactions as well. // But it might not always be good. @@ -292,7 +305,7 @@ impl ReadyTransactions { // add the transactions that just got unlocked to `best` for hash in unlocks { - if let Some(tx) = self.ready.get_mut(&hash) { + if let Some(tx) = self.ready.write().get_mut(&hash) { tx.requires_offset += 1; // this transaction is ready if tx.requires_offset == tx.transaction.transaction.requires.len() { @@ -328,10 +341,13 @@ impl ReadyTransactions { } // now check if collective priority is lower than the replacement transaction. - let old_priority = replace_hashes - .iter() - .filter_map(|hash| self.ready.get(hash)) - .fold(0u64, |total, tx| total.saturating_add(tx.transaction.transaction.priority)); + let old_priority = { + let ready = self.ready.read(); + replace_hashes + .iter() + .filter_map(|hash| ready.get(hash)) + .fold(0u64, |total, tx| total.saturating_add(tx.transaction.transaction.priority)) + }; // bail - the transaction has too low priority to replace the old ones if old_priority >= tx.priority { @@ -349,7 +365,7 @@ impl ReadyTransactions { None => return Ok(removed), }; - let tx = self.ready.remove(&hash).expect(HASH_READY); + let tx = self.ready.write().remove(&hash).expect(HASH_READY); // check if this transaction provides stuff that is not provided by the new one. let (mut unlocks, tx) = (tx.unlocks, tx.transaction.transaction); { @@ -371,18 +387,18 @@ impl ReadyTransactions { /// Returns number of transactions in this queue. pub fn len(&self) -> usize { - self.ready.len() + self.ready.read().len() } } -pub struct BestIterator<'a, Hash: 'a, Ex: 'a> { - all: &'a HashMap>, +pub struct BestIterator { + all: Arc>>>, awaiting: HashMap)>, best: BTreeSet>, } -impl<'a, Hash: 'a + hash::Hash + Member, Ex: 'a> BestIterator<'a, Hash, Ex> { +impl BestIterator { /// Depending on number of satisfied requirements insert given ref /// either to awaiting set or to best set. fn best_or_awaiting(&mut self, satisfied: usize, tx_ref: TransactionRef) { @@ -397,32 +413,41 @@ impl<'a, Hash: 'a + hash::Hash + Member, Ex: 'a> BestIterator<'a, Hash, Ex> { } } -impl<'a, Hash: 'a + hash::Hash + Member, Ex: 'a> Iterator for BestIterator<'a, Hash, Ex> { +impl Iterator for BestIterator { type Item = Arc>; fn next(&mut self) -> Option { - let best = self.best.iter().next_back()?.clone(); - let best = self.best.take(&best)?; + loop { + let best = self.best.iter().next_back()?.clone(); + let best = self.best.take(&best)?; - let ready = match self.all.get(&best.transaction.hash) { - Some(ready) => ready, - // The transaction is not in all, maybe it was removed in the meantime? - None => return self.next(), - }; + let next = self.all.read().get(&best.transaction.hash).cloned(); + let ready = match next { + Some(ready) => ready, + // The transaction is not in all, maybe it was removed in the meantime? + None => continue, + }; - // Insert transactions that just got unlocked. - for hash in &ready.unlocks { - // first check local awaiting transactions - if let Some((mut satisfied, tx_ref)) = self.awaiting.remove(hash) { - satisfied += 1; - self.best_or_awaiting(satisfied, tx_ref); - // then get from the pool - } else if let Some(next) = self.all.get(hash) { - self.best_or_awaiting(next.requires_offset + 1, next.transaction.clone()); + // Insert transactions that just got unlocked. + for hash in &ready.unlocks { + // first check local awaiting transactions + let res = if let Some((mut satisfied, tx_ref)) = self.awaiting.remove(hash) { + satisfied += 1; + Some((satisfied, tx_ref)) + // then get from the pool + } else if let Some(next) = self.all.read().get(hash) { + Some((next.requires_offset + 1, next.transaction.clone())) + } else { + None + }; + + if let Some((satisfied, tx_ref)) = res { + self.best_or_awaiting(satisfied, tx_ref) + } } - } - Some(best.transaction.clone()) + return Some(best.transaction.clone()) + } } } diff --git a/substrate/core/transaction-pool/src/tests.rs b/substrate/core/transaction-pool/src/tests.rs index f99ff8b903..e02bf05401 100644 --- a/substrate/core/transaction-pool/src/tests.rs +++ b/substrate/core/transaction-pool/src/tests.rs @@ -49,12 +49,12 @@ impl txpool::ChainApi for TestApi { }; let provides = vec![vec![uxt.transfer.nonce as u8]]; - Ok(TransactionValidity::Valid( - /* priority: */1, + Ok(TransactionValidity::Valid { + priority: 1, requires, provides, - /* longevity: */64 - )) + longevity: 64 + }) } fn block_id_to_number(&self, at: &BlockId) -> error::Result>> { @@ -109,7 +109,7 @@ fn submission_should_work() { assert_eq!(209, index(&BlockId::number(0))); pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); - let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect()); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); assert_eq!(pending, vec![209]); } @@ -119,7 +119,7 @@ fn multiple_submission_should_work() { pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap(); - let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect()); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); assert_eq!(pending, vec![209, 210]); } @@ -128,7 +128,7 @@ fn early_nonce_should_be_culled() { let pool = pool(); pool.submit_one(&BlockId::number(0), uxt(Alice, 208)).unwrap(); - let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect()); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); assert_eq!(pending, Vec::::new()); } @@ -137,11 +137,11 @@ fn late_nonce_should_be_queued() { let pool = pool(); pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap(); - let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect()); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); assert_eq!(pending, Vec::::new()); pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); - let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect()); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); assert_eq!(pending, vec![209, 210]); } @@ -151,12 +151,12 @@ fn prune_tags_should_work() { pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap(); pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap(); - let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect()); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); assert_eq!(pending, vec![209, 210]); pool.prune_tags(&BlockId::number(1), vec![vec![209]]).unwrap(); - let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect()); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); assert_eq!(pending, vec![210]); } @@ -169,7 +169,7 @@ fn should_ban_invalid_transactions() { pool.submit_one(&BlockId::number(0), uxt.clone()).unwrap_err(); // when - let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect()); + let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect(); assert_eq!(pending, Vec::::new()); // then diff --git a/substrate/core/trie/src/lib.rs b/substrate/core/trie/src/lib.rs index 754c6ae16c..87d955a357 100644 --- a/substrate/core/trie/src/lib.rs +++ b/substrate/core/trie/src/lib.rs @@ -45,7 +45,7 @@ pub use trie_stream::TrieStream; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; /// Various re-exports from the `trie-db` crate. -pub use trie_db::{Trie, TrieMut, DBValue, Recorder}; +pub use trie_db::{Trie, TrieMut, DBValue, Recorder, Query}; /// As in `trie_db`, but less generic, error type for the crate. pub type TrieError = trie_db::TrieError; @@ -53,7 +53,7 @@ pub type TrieError = trie_db::TrieError; pub trait AsHashDB: hash_db::AsHashDB {} impl> AsHashDB for T {} /// As in `hash_db`, but less generic, trait exposed. -pub type HashDB = hash_db::HashDB; +pub type HashDB<'a, H> = hash_db::HashDB + 'a; /// As in `memory_db`, but less generic, trait exposed. pub type MemoryDB = memory_db::MemoryDB; @@ -73,6 +73,36 @@ pub fn trie_root(input: I) -> H::Out where trie_root::trie_root::(input) } +/// Determine a trie root given a hash DB and delta values. +pub fn delta_trie_root(db: &mut HashDB, mut root: H::Out, delta: I) -> Result>> where + I: IntoIterator)>, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, +{ + { + let mut trie = TrieDBMut::::from_existing(db, &mut root)?; + + for (key, change) in delta { + match change { + Some(val) => trie.insert(key.as_ref(), val.as_ref())?, + None => trie.remove(key.as_ref())?, // TODO: archive mode + }; + } + } + + Ok(root) +} + +/// Read a value from the trie. +pub fn read_trie_value(db: &HashDB, root: &H::Out, key: &[u8]) -> Result>, Box>> { + Ok(TrieDB::::new(db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) +} + +/// Read a value from the trie with given Query. +pub fn read_trie_value_with>(db: &HashDB, root: &H::Out, key: &[u8], query: Q) -> Result>, Box>> { + Ok(TrieDB::::new(db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) +} + /// Determine a trie root node's data given its ordered contents, closed form. pub fn unhashed_trie(input: I) -> Vec where I: IntoIterator, @@ -95,6 +125,102 @@ where ) } +/// Determine whether a child trie key is valid. `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. +pub fn is_child_trie_key_valid(_storage_key: &[u8]) -> bool { + true +} + +/// Determine the default child trie root. +pub fn default_child_trie_root(_storage_key: &[u8]) -> Vec { + let mut db = MemoryDB::default(); + let mut root = H::Out::default(); + let mut empty = TrieDBMut::::new(&mut db, &mut root); + empty.commit(); + empty.root().as_ref().to_vec() +} + +/// Determine a child trie root given its ordered contents, closed form. H is the default hasher, but a generic +/// implementation may ignore this type parameter and use other hashers. +pub fn child_trie_root(_storage_key: &[u8], input: I) -> Vec where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, +{ + trie_root::(input).as_ref().iter().cloned().collect() +} + +/// Determine a child trie root given a hash DB and delta values. H is the default hasher, but a generic implementation may ignore this type parameter and use other hashers. +pub fn child_delta_trie_root(_storage_key: &[u8], db: &mut HashDB, root_vec: Vec, delta: I) -> Result, Box>> where + I: IntoIterator)>, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, +{ + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(&root_vec); // root is fetched from DB, not writable by runtime, so it's always valid. + + { + let mut trie = TrieDBMut::::from_existing(db, &mut root)?; + + for (key, change) in delta { + match change { + Some(val) => trie.insert(key.as_ref(), val.as_ref())?, + None => trie.remove(key.as_ref())?, // TODO: archive mode + }; + } + } + + Ok(root.as_ref().to_vec()) +} + +/// Call `f` for all keys in a child trie. +pub fn for_keys_in_child_trie(_storage_key: &[u8], db: &HashDB, root_slice: &[u8], mut f: F) -> Result<(), Box>> { + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid. + + let trie = TrieDB::::new(db, &root)?; + let iter = trie.iter()?; + + for x in iter { + let (key, _) = x?; + f(&key); + } + + Ok(()) +} + +/// Record all keys for a given root. +pub fn record_all_keys(db: &HashDB, root: &H::Out, recorder: &mut Recorder) -> Result<(), Box>> { + let trie = TrieDB::::new(db, root)?; + let iter = trie.iter()?; + + for x in iter { + let (key, _) = x?; + + // there's currently no API like iter_with() + // => use iter to enumerate all keys AND lookup each + // key using get_with + trie.get_with(&key, &mut *recorder)?; + } + + Ok(()) +} + +/// Read a value from the child trie. +pub fn read_child_trie_value(_storage_key: &[u8], db: &HashDB, root_slice: &[u8], key: &[u8]) -> Result>, Box>> { + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid. + + Ok(TrieDB::::new(db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) +} + +/// Read a value from the child trie with given query. +pub fn read_child_trie_value_with>(_storage_key: &[u8], db: &HashDB, root_slice: &[u8], key: &[u8], query: Q) -> Result>, Box>> { + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid. + + Ok(TrieDB::::new(db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) +} + // Utilities (not exported): const EMPTY_TRIE: u8 = 0; @@ -418,4 +544,4 @@ mod tests { assert_eq!(pairs, iter_pairs); } -} \ No newline at end of file +} diff --git a/substrate/node/cli/Cargo.toml b/substrate/node/cli/Cargo.toml index 0b939bf01e..61b31a5f67 100644 --- a/substrate/node/cli/Cargo.toml +++ b/substrate/node/cli/Cargo.toml @@ -9,4 +9,16 @@ log = "0.4" tokio = "0.1.7" exit-future = "0.1" substrate-cli = { path = "../../core/cli" } -node-service = { path = "../service" } +substrate-primitives = { path = "../../core/primitives" } +node-runtime = { path = "../runtime" } +node-primitives = { path = "../primitives" } +node-network = { path = "../network" } +hex-literal = "0.1" +substrate-service = { path = "../../core/service" } +substrate-transaction-pool = { path = "../../core/transaction-pool" } +substrate-network = { path = "../../core/network" } +sr-primitives = { path = "../../core/sr-primitives" } +node-executor = { path = "../executor" } + +[dev-dependencies] +substrate-service-test = { path = "../../core/service/test" } diff --git a/substrate/node/service/res/bbq-birch.json b/substrate/node/cli/res/bbq-birch.json similarity index 100% rename from substrate/node/service/res/bbq-birch.json rename to substrate/node/cli/res/bbq-birch.json diff --git a/substrate/node/service/src/chain_spec.rs b/substrate/node/cli/src/chain_spec.rs similarity index 90% rename from substrate/node/service/src/chain_spec.rs rename to substrate/node/cli/src/chain_spec.rs index 7d11daa366..cc4612f27b 100644 --- a/substrate/node/service/src/chain_spec.rs +++ b/substrate/node/cli/src/chain_spec.rs @@ -20,11 +20,14 @@ use primitives::{AuthorityId, ed25519}; use node_runtime::{GenesisConfig, ConsensusConfig, CouncilSeatsConfig, CouncilVotingConfig, DemocracyConfig, SessionConfig, StakingConfig, TimestampConfig, BalancesConfig, TreasuryConfig, ContractConfig, Permill, Perbill}; -use service::ChainSpec; +use substrate_service; const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; -pub fn bbq_birch_config() -> Result, String> { +/// Specialised `ChainSpec`. +pub type ChainSpec = substrate_service::ChainSpec; + +pub fn bbq_birch_config() -> Result { ChainSpec::from_embedded(include_bytes!("../res/bbq-birch.json")) } @@ -122,7 +125,7 @@ fn staging_testnet_config_genesis() -> GenesisConfig { } /// Staging testnet config. -pub fn staging_testnet_config() -> ChainSpec { +pub fn staging_testnet_config() -> ChainSpec { let boot_nodes = vec![ ]; ChainSpec::from_genesis( @@ -227,7 +230,7 @@ fn development_config_genesis() -> GenesisConfig { } /// Development config (single validator Alice) -pub fn development_config() -> ChainSpec { +pub fn development_config() -> ChainSpec { ChainSpec::from_genesis("Development", "development", development_config_genesis, vec![], None, None, None) } @@ -238,18 +241,30 @@ fn local_testnet_genesis() -> GenesisConfig { ]) } -fn local_testnet_genesis_instant() -> GenesisConfig { - let mut genesis = local_testnet_genesis(); - genesis.timestamp = Some(TimestampConfig { period: 0 }); - genesis -} - /// Local testnet config (multivalidator Alice + Bob) -pub fn local_testnet_config() -> ChainSpec { +pub fn local_testnet_config() -> ChainSpec { ChainSpec::from_genesis("Local Testnet", "local_testnet", local_testnet_genesis, vec![], None, None, None) } -/// Local testnet config (multivalidator Alice + Bob) -pub fn integration_test_config() -> ChainSpec { - ChainSpec::from_genesis("Integration Test", "test", local_testnet_genesis_instant, vec![], None, None, None) +#[cfg(test)] +mod tests { + use super::*; + use service_test; + use service::Factory; + + fn local_testnet_genesis_instant() -> GenesisConfig { + let mut genesis = local_testnet_genesis(); + genesis.timestamp = Some(TimestampConfig { period: 0 }); + genesis + } + + /// Local testnet config (multivalidator Alice + Bob) + pub fn integration_test_config() -> ChainSpec { + ChainSpec::from_genesis("Integration Test", "test", local_testnet_genesis_instant, vec![], None, None, None) + } + + #[test] + fn test_connectivity() { + service_test::connectivity::(integration_test_config()); + } } diff --git a/substrate/node/cli/src/lib.rs b/substrate/node/cli/src/lib.rs index c83f8bb8fa..0d8c9c4d71 100644 --- a/substrate/node/cli/src/lib.rs +++ b/substrate/node/cli/src/lib.rs @@ -22,17 +22,32 @@ extern crate tokio; extern crate substrate_cli as cli; -extern crate node_service as service; +extern crate substrate_primitives as primitives; +extern crate node_runtime; extern crate exit_future; +#[macro_use] +extern crate hex_literal; +#[cfg(test)] +extern crate substrate_service_test as service_test; +extern crate substrate_transaction_pool as transaction_pool; +extern crate substrate_network as network; +extern crate node_network; +extern crate sr_primitives as runtime_primitives; +extern crate node_primitives; +#[macro_use] +extern crate substrate_service; +extern crate node_executor; #[macro_use] extern crate log; pub use cli::error; +mod chain_spec; +mod service; use tokio::runtime::Runtime; -pub use service::{Components as ServiceComponents, Service, CustomConfiguration, ServiceFactory}; pub use cli::{VersionInfo, IntoExit}; +use substrate_service::{ServiceFactory, Roles as ServiceRoles}; /// The chain specification option. #[derive(Clone, Debug)] @@ -49,12 +64,12 @@ pub enum ChainSpec { /// Get a chain config from a spec setting. impl ChainSpec { - pub(crate) fn load(self) -> Result { + pub(crate) fn load(self) -> Result { Ok(match self { - ChainSpec::BbqBirch => service::chain_spec::bbq_birch_config()?, - ChainSpec::Development => service::chain_spec::development_config(), - ChainSpec::LocalTestnet => service::chain_spec::local_testnet_config(), - ChainSpec::StagingTestnet => service::chain_spec::staging_testnet_config(), + ChainSpec::BbqBirch => chain_spec::bbq_birch_config()?, + ChainSpec::Development => chain_spec::development_config(), + ChainSpec::LocalTestnet => chain_spec::local_testnet_config(), + ChainSpec::StagingTestnet => chain_spec::staging_testnet_config(), }) } @@ -69,7 +84,7 @@ impl ChainSpec { } } -fn load_spec(id: &str) -> Result, String> { +fn load_spec(id: &str) -> Result, String> { Ok(match ChainSpec::from(id) { Some(spec) => Some(spec.load()?), None => None, @@ -93,7 +108,7 @@ pub fn run(args: I, exit: E, version: cli::VersionInfo) -> error::Resul info!("Roles: {:?}", config.roles); let mut runtime = Runtime::new()?; let executor = runtime.executor(); - match config.roles == service::Roles::LIGHT { + match config.roles == ServiceRoles::LIGHT { true => run_until_exit(&mut runtime, service::Factory::new_light(config, executor)?, exit)?, false => run_until_exit(&mut runtime, service::Factory::new_full(config, executor)?, exit)?, } @@ -108,7 +123,7 @@ fn run_until_exit( e: E, ) -> error::Result<()> where - C: service::Components, + C: substrate_service::Components, E: IntoExit, { let (exit_send, exit) = exit_future::signal(); diff --git a/substrate/node/cli/src/service.rs b/substrate/node/cli/src/service.rs new file mode 100644 index 0000000000..4e718816e1 --- /dev/null +++ b/substrate/node/cli/src/service.rs @@ -0,0 +1,133 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +#![warn(unused_extern_crates)] + +//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. + +use std::sync::Arc; +use transaction_pool::{self, txpool::{Pool as TransactionPool}}; +use node_primitives::Block; +use node_runtime::GenesisConfig; +use node_network::Protocol as NodeProtocol; +use substrate_service::{ + FactoryFullConfiguration, LightComponents, FullComponents, FullBackend, + LightBackend, FullExecutor, LightExecutor +}; +use network::import_queue::{BasicQueue, BlockOrigin, ImportBlock, Verifier}; +use runtime_primitives::{traits::Block as BlockT}; +use primitives::AuthorityId; +use node_executor; + +// TODO: Remove me, when we have a functional consensus. +/// A verifier that doesn't actually do any checks +pub struct NoneVerifier; +/// This Verifiyer accepts all data as valid +impl Verifier for NoneVerifier { + fn verify( + &self, + origin: BlockOrigin, + header: B::Header, + justification: Vec, + body: Option> + ) -> Result<(ImportBlock, Option>), String> { + Ok((ImportBlock { + origin, + header, + body, + finalized: true, + external_justification: justification, + internal_justification: vec![], + auxiliary: Vec::new(), + }, None)) + } +} + +construct_simple_service!(Service); + +construct_service_factory! { + struct Factory { + Block = Block, + NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) }, + RuntimeDispatch = node_executor::Executor, + FullTransactionPoolApi = transaction_pool::ChainApi, FullExecutor, Block> + { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, + LightTransactionPoolApi = transaction_pool::ChainApi, LightExecutor, Block> + { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) }, + Genesis = GenesisConfig, + Configuration = (), + FullService = Service> + { |config, executor| Service::>::new(config, executor) }, + LightService = Service> + { |config, executor| Service::>::new(config, executor) }, + ImportQueue = BasicQueue + { |_, _| Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) } + { |_, _| Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) }, + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "rhd")] + fn test_sync() { + use {service_test, Factory}; + use client::{ImportBlock, BlockOrigin}; + + let alice: Arc = Arc::new(Keyring::Alice.into()); + let bob: Arc = Arc::new(Keyring::Bob.into()); + let validators = vec![alice.public().0.into(), bob.public().0.into()]; + let keys: Vec<&ed25519::Pair> = vec![&*alice, &*bob]; + let offline = Arc::new(RwLock::new(OfflineTracker::new())); + let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap(); + let block_factory = |service: &::FullService| { + let block_id = BlockId::number(service.client().info().unwrap().chain.best_number); + let parent_header = service.client().header(&block_id).unwrap().unwrap(); + let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone()); + let proposer_factory = consensus::ProposerFactory { + client: service.client().clone(), + transaction_pool: service.transaction_pool().clone(), + network: consensus_net, + offline: offline.clone(), + force_delay: 0, + handle: dummy_runtime.executor(), + }; + let (proposer, _, _) = proposer_factory.init(&parent_header, &validators, alice.clone()).unwrap(); + let block = proposer.propose().expect("Error making test block"); + ImportBlock { + origin: BlockOrigin::File, + external_justification: Vec::new(), + internal_justification: Vec::new(), + finalized: true, + body: Some(block.extrinsics), + header: block.header, + auxiliary: Vec::new(), + } + }; + let extrinsic_factory = |service: &::FullService| { + let payload = (0, Call::Balances(BalancesCall::transfer(RawAddress::Id(bob.public().0.into()), 69.into())), Era::immortal(), service.client().genesis_hash()); + let signature = alice.sign(&payload.encode()).into(); + let id = alice.public().0.into(); + let xt = UncheckedExtrinsic { + signature: Some((RawAddress::Id(id), signature, payload.0, Era::immortal())), + function: payload.1, + }.encode(); + let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); + OpaqueExtrinsic(v) + }; + service_test::sync::(chain_spec::integration_test_config(), block_factory, extrinsic_factory); + } + +} diff --git a/substrate/node/consensus/src/lib.rs b/substrate/node/consensus/src/lib.rs index e80a668ced..9638a39715 100644 --- a/substrate/node/consensus/src/lib.rs +++ b/substrate/node/consensus/src/lib.rs @@ -48,10 +48,10 @@ use std::sync::Arc; use std::time::{self, Duration, Instant}; use client::{Client as SubstrateClient, CallExecutor}; -use client::runtime_api::{Core, BlockBuilder as BlockBuilderAPI, Miscellaneous, OldTxQueue, BlockBuilderError}; +use client::runtime_api::{Core, BlockBuilder as BlockBuilderAPI, Miscellaneous, OldTxQueue}; use codec::{Decode, Encode}; -use node_primitives::{AccountId, Timestamp, SessionKey, InherentData}; -use node_runtime::Runtime; +use node_primitives::{AccountId, Timestamp, SessionKey}; +use node_runtime::{Runtime, InherentError, TimestampInherentError, InherentData}; use primitives::{AuthorityId, ed25519, Blake2Hasher}; use runtime_primitives::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, As, BlockNumberToHash}; use runtime_primitives::generic::{BlockId, Era}; @@ -135,9 +135,8 @@ impl<'a, B, E, Block> AuthoringApi for SubstrateClient where let mut block_builder = self.new_block_at(at)?; if runtime_version.has_api(*b"inherent", 1) { - for inherent in self.inherent_extrinsics(at, &inherent_data)? { - block_builder.push(inherent)?; - } + self.inherent_extrinsics(at, &inherent_data)? + .into_iter().try_for_each(|i| block_builder.push(i))?; } build_ctx(&mut block_builder); @@ -383,7 +382,7 @@ impl bft::Proposer<::Block> for Proposer where &inherent ) { Ok(Ok(())) => None, - Ok(Err(BlockBuilderError::TimestampInFuture(timestamp))) => Some(timestamp), + Ok(Err(InherentError::Timestamp(TimestampInherentError::TimestampInFuture(timestamp)))) => Some(timestamp), Ok(Err(e)) => { debug!(target: "bft", "Invalid proposal (check_inherents): {:?}", e); return Box::new(future::ok(false)); diff --git a/substrate/node/primitives/src/lib.rs b/substrate/node/primitives/src/lib.rs index c6802c7164..2e436e31de 100644 --- a/substrate/node/primitives/src/lib.rs +++ b/substrate/node/primitives/src/lib.rs @@ -36,7 +36,7 @@ use rstd::prelude::*; use runtime_primitives::generic; #[cfg(feature = "std")] use primitives::bytes; -use runtime_primitives::traits::BlakeTwo256; +use runtime_primitives::traits::{BlakeTwo256, self}; /// An index to a block. pub type BlockNumber = u64; @@ -79,22 +79,9 @@ pub type BlockId = generic::BlockId; #[derive(PartialEq, Eq, Clone, Default, Encode, Decode)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] pub struct UncheckedExtrinsic(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); -/// -/// Inherent data to include in a block. -#[derive(Encode, Decode)] -pub struct InherentData { - /// Current timestamp. - pub timestamp: Timestamp, - /// Indices of offline validators. - pub offline_indices: Vec, -} -impl InherentData { - /// Create a new `InherentData` instance. - pub fn new(timestamp: Timestamp, offline_indices: Vec) -> Self { - Self { - timestamp, - offline_indices - } +impl traits::Extrinsic for UncheckedExtrinsic { + fn is_signed(&self) -> Option { + None } } diff --git a/substrate/node/runtime/src/lib.rs b/substrate/node/runtime/src/lib.rs index 45b5db63ad..36dcd347b9 100644 --- a/substrate/node/runtime/src/lib.rs +++ b/substrate/node/runtime/src/lib.rs @@ -40,7 +40,6 @@ extern crate substrate_primitives; #[macro_use] extern crate parity_codec_derive; -#[cfg_attr(not(feature = "std"), macro_use)] extern crate sr_std as rstd; extern crate srml_balances as balances; extern crate srml_consensus as consensus; @@ -61,13 +60,13 @@ use rstd::prelude::*; use substrate_primitives::u32_trait::{_2, _4}; use node_primitives::{ AccountId, AccountIndex, Balance, BlockNumber, Hash, Index, - SessionKey, Signature, InherentData, Timestamp as TimestampType + SessionKey, Signature }; -use runtime_api::{BlockBuilderError, runtime::*}; +use runtime_api::runtime::*; use runtime_primitives::ApplyResult; use runtime_primitives::transaction_validity::TransactionValidity; use runtime_primitives::generic; -use runtime_primitives::traits::{Convert, BlakeTwo256, DigestItem, Block as BlockT}; +use runtime_primitives::traits::{Convert, BlakeTwo256, Block as BlockT}; use version::{RuntimeVersion, ApiId}; use council::{motions as council_motions, voting as council_voting}; #[cfg(feature = "std")] @@ -192,40 +191,24 @@ impl contract::Trait for Runtime { type Event = Event; } -impl DigestItem for Log { - type Hash = Hash; - type AuthorityId = SessionKey; - - fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> { - match self.0 { - InternalLog::consensus(ref item) => item.as_authorities_change(), - _ => None, - } - } - - fn as_changes_trie_root(&self) -> Option<&Self::Hash> { - match self.0 { - InternalLog::system(ref item) => item.as_changes_trie_root(), - _ => None, - } - } -} - construct_runtime!( - pub enum Runtime with Log(InternalLog: DigestItem) { + pub enum Runtime with Log(InternalLog: DigestItem) where + Block = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { System: system::{default, Log(ChangesTrieRoot)}, - Consensus: consensus::{Module, Call, Storage, Config, Log(AuthoritiesChange)}, + Consensus: consensus::{Module, Call, Storage, Config, Log(AuthoritiesChange), Inherent}, Balances: balances, - Timestamp: timestamp::{Module, Call, Storage, Config}, + Timestamp: timestamp::{Module, Call, Storage, Config, Inherent}, Session: session, Staking: staking, Democracy: democracy, Council: council::{Module, Call, Storage, Event}, CouncilVoting: council_voting, CouncilMotions: council_motions::{Module, Call, Storage, Event, Origin}, - CouncilSeats: council_seats::{Config}, + CouncilSeats: council_seats::{Config}, Treasury: treasury, - Contract: contract::{Module, Call, Config, Event}, + Contract: contract::{Module, Call, Config, Event}, } ); @@ -269,7 +252,7 @@ impl_apis! { } } - impl BlockBuilder for Runtime { + impl BlockBuilder for Runtime { fn initialise_block(header: ::Header) { Executive::initialise_block(&header) } @@ -283,49 +266,11 @@ impl_apis! { } fn inherent_extrinsics(data: InherentData) -> Vec { - let mut inherent = vec![generic::UncheckedMortalExtrinsic::new_unsigned( - Call::Timestamp(TimestampCall::set(data.timestamp.into())) - )]; - - if !data.offline_indices.is_empty() { - inherent.push(generic::UncheckedMortalExtrinsic::new_unsigned( - Call::Consensus(ConsensusCall::note_offline(data.offline_indices)) - )); - } - - inherent + data.create_inherent_extrinsics() } - fn check_inherents(block: Block, data: InherentData) -> Result<(), BlockBuilderError> { - // TODO: v1: should be automatically gathered - - // Timestamp module... - const MAX_TIMESTAMP_DRIFT: TimestampType = 60; - let xt = block.extrinsics.get(TIMESTAMP_SET_POSITION as usize) - .ok_or_else(|| BlockBuilderError::Generic("No valid timestamp inherent in block".into()))?; - let t = match (xt.is_signed(), &xt.function) { - (false, Call::Timestamp(TimestampCall::set(t))) => t, - _ => return Err(BlockBuilderError::Generic("No valid timestamp inherent in block".into())), - }; - let t = (*t).into(); - if t > data.timestamp + MAX_TIMESTAMP_DRIFT { - return Err(BlockBuilderError::TimestampInFuture(t)) - } - - // Offline indices - let noted_offline = - block.extrinsics.get(NOTE_OFFLINE_POSITION as usize).and_then(|xt| match xt.function { - Call::Consensus(ConsensusCall::note_offline(ref x)) => Some(&x[..]), - _ => None, - }).unwrap_or(&[]); - - noted_offline.iter().try_for_each(|n| - if !data.offline_indices.contains(n) { - Err(BlockBuilderError::Generic("Online node marked offline".into())) - } else { - Ok(()) - } - ) + fn check_inherents(block: Block, data: InherentData) -> Result<(), InherentError> { + data.check_inherents(block) } fn random_seed() -> ::Hash { diff --git a/substrate/node/runtime/wasm/Cargo.lock b/substrate/node/runtime/wasm/Cargo.lock index 7a655c6415..fc45e6f94c 100644 --- a/substrate/node/runtime/wasm/Cargo.lock +++ b/substrate/node/runtime/wasm/Cargo.lock @@ -530,7 +530,6 @@ name = "sr-api" version = "0.1.0" dependencies = [ "parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "sr-primitives 0.1.0", "sr-std 0.1.0", "sr-version 0.1.0", @@ -563,7 +562,6 @@ dependencies = [ "serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 0.1.0", "sr-std 0.1.0", - "sr-version 0.1.0", "substrate-primitives 0.1.0", ] @@ -593,6 +591,7 @@ dependencies = [ "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", + "sr-primitives 0.1.0", "sr-std 0.1.0", ] @@ -788,6 +787,7 @@ version = "0.1.0" dependencies = [ "hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)", "sr-io 0.1.0", diff --git a/substrate/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm b/substrate/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm new file mode 100644 index 0000000000..392b360796 Binary files /dev/null and b/substrate/node/runtime/wasm/target/wasm32-unknown-unknown/release/node_runtime.compact.wasm differ diff --git a/substrate/node/service/Cargo.toml b/substrate/node/service/Cargo.toml deleted file mode 100644 index 2ef8ae5a12..0000000000 --- a/substrate/node/service/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "node-service" -version = "0.1.0" -authors = ["Parity Technologies "] - -[dependencies] -error-chain = "0.12" -hex-literal = "0.1" -lazy_static = "1.0" -log = "0.4" -node-consensus = { path = "../consensus" } -node-executor = { path = "../executor" } -node-network = { path = "../network" } -node-primitives = { path = "../primitives" } -node-runtime = { path = "../runtime" } -parity-codec = { version = "2.1" } -parking_lot = "0.4" -slog = "^2" -sr-io = { path = "../../core/sr-io" } -sr-primitives = { path = "../../core/sr-primitives" } -substrate-client = { path = "../../core/client" } -substrate-network = { path = "../../core/network" } -substrate-primitives = { path = "../../core/primitives" } -substrate-service = { path = "../../core/service" } -substrate-telemetry = { path = "../../core/telemetry" } -substrate-transaction-pool = { path = "../../core/transaction-pool" } -tokio = "0.1.7" - -[dev-dependencies] -substrate-service-test = { path = "../../core/service/test" } -substrate-test-client = { path = "../../core/test-client" } -substrate-keyring = { path = "../../core/keyring" } -rhododendron = "0.3" diff --git a/substrate/node/service/src/lib.rs b/substrate/node/service/src/lib.rs deleted file mode 100644 index dc799ba4dd..0000000000 --- a/substrate/node/service/src/lib.rs +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2018 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -#![warn(unused_extern_crates)] - -//! Substrate service. Specialized wrapper over substrate service. - -extern crate node_primitives; -extern crate node_runtime; -extern crate node_executor; -extern crate node_network; -extern crate substrate_client as client; -extern crate substrate_network as network; -extern crate substrate_primitives as primitives; -extern crate substrate_service as service; -extern crate substrate_transaction_pool as transaction_pool; -extern crate tokio; -#[cfg(test)] -extern crate substrate_service_test as service_test; - -#[macro_use] -extern crate hex_literal; -#[cfg(all(test, feature="rhd"))] -extern crate rhododendron as rhd; -extern crate sr_primitives as runtime_primitives; -pub mod chain_spec; - -use std::sync::Arc; -use transaction_pool::txpool::{Pool as TransactionPool}; -use node_primitives::{Block, Hash}; -use node_runtime::GenesisConfig; -use client::Client; -use node_network::Protocol as DemoProtocol; -use tokio::runtime::TaskExecutor; -use service::FactoryFullConfiguration; -use network::import_queue::{BasicQueue, BlockOrigin, ImportBlock, Verifier}; -use runtime_primitives::{traits::Block as BlockT}; -use primitives::{Blake2Hasher, AuthorityId}; - -pub use service::{Roles, PruningMode, TransactionPoolOptions, ServiceFactory, - ErrorKind, Error, ComponentBlock, LightComponents, FullComponents}; -pub use client::ExecutionStrategy; - -/// Specialised `ChainSpec`. -pub type ChainSpec = service::ChainSpec; -/// Client type for specialised `Components`. -pub type ComponentClient = Client<::Backend, ::Executor, Block>; -pub type NetworkService = network::Service::NetworkProtocol, Hash>; - -/// A verifier that doesn't actually do any checks -pub struct NoneVerifier; -/// This Verifiyer accepts all data as valid -impl Verifier for NoneVerifier { - fn verify( - &self, - origin: BlockOrigin, - header: B::Header, - justification: Vec, - body: Option> - ) -> Result<(ImportBlock, Option>), String> { - Ok((ImportBlock { - origin, - header, - body, - finalized: true, - external_justification: justification, - internal_justification: vec![], - auxiliary: Vec::new(), - }, None)) - } -} - -/// A collection of type to generalise specific components over full / light client. -pub trait Components: service::Components { - /// Demo API. - type Api: 'static + Send + Sync; - /// Client backend. - type Backend: 'static + client::backend::Backend; - /// Client executor. - type Executor: 'static + client::CallExecutor + Send + Sync; -} - -impl Components for service::LightComponents { - type Api = service::LightClient; - type Executor = service::LightExecutor; - type Backend = service::LightBackend; -} - -impl Components for service::FullComponents { - type Api = service::FullClient; - type Executor = service::FullExecutor; - type Backend = service::FullBackend; -} - -/// All configuration for the node. -pub type Configuration = FactoryFullConfiguration; - -/// Demo-specific configuration. -#[derive(Default)] -pub struct CustomConfiguration; - -/// Config for the substrate service. -pub struct Factory; - -impl service::ServiceFactory for Factory { - type Block = Block; - type ExtrinsicHash = Hash; - type NetworkProtocol = DemoProtocol; - type RuntimeDispatch = node_executor::Executor; - type FullTransactionPoolApi = transaction_pool::ChainApi, service::FullExecutor, Block>; - type LightTransactionPoolApi = transaction_pool::ChainApi, service::LightExecutor, Block>; - type Genesis = GenesisConfig; - type Configuration = CustomConfiguration; - type FullService = Service>; - type LightService = Service>; - /// instance of import queue for clients - type ImportQueue = BasicQueue; - - fn build_full_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, Error> - { - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - } - - fn build_light_transaction_pool(config: TransactionPoolOptions, client: Arc>) - -> Result, Error> - { - Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) - } - - fn build_network_protocol(_config: &Configuration) - -> Result - { - Ok(DemoProtocol::new()) - } - - fn build_full_import_queue( - _config: &FactoryFullConfiguration, - _client: Arc>, - ) -> Result, service::Error> { - Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) - } - - fn build_light_import_queue( - _config: &FactoryFullConfiguration, - _client: Arc>, - ) -> Result, service::Error> { - Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) - } - - fn new_light(config: Configuration, executor: TaskExecutor) - -> Result>, Error> - { - let service = service::Service::>::new(config, executor.clone())?; - Ok(Service { - inner: service, - _consensus: None, - }) - } - - fn new_full(config: Configuration, executor: TaskExecutor) - -> Result>, Error> - { - let service = service::Service::>::new(config, executor.clone())?; - // FIXME: Spin consensus service if configured - let consensus = None; - Ok(Service { - inner: service, - _consensus: consensus, - }) - } -} -/// Demo service. -pub struct Service { - inner: service::Service, - _consensus: Option, // FIXME: add actual consensus engine -} - -impl ::std::ops::Deref for Service { - type Target = service::Service; - fn deref(&self) -> &Self::Target { - &self.inner - } -} - - -/// Creates bare client without any networking. -pub fn new_client(config: Configuration) - -> Result>>, Error> -{ - service::new_client::(&config) -} - -#[cfg(test)] -mod tests { - use {service_test, Factory, chain_spec}; - - #[test] - fn test_connectivity() { - service_test::connectivity::(chain_spec::integration_test_config()); - } - - #[test] - #[cfg(feature = "rhd")] - fn test_sync() { - use client::{ImportBlock, BlockOrigin}; - - let alice: Arc = Arc::new(Keyring::Alice.into()); - let bob: Arc = Arc::new(Keyring::Bob.into()); - let validators = vec![alice.public().0.into(), bob.public().0.into()]; - let keys: Vec<&ed25519::Pair> = vec![&*alice, &*bob]; - let offline = Arc::new(RwLock::new(OfflineTracker::new())); - let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap(); - let block_factory = |service: &::FullService| { - let block_id = BlockId::number(service.client().info().unwrap().chain.best_number); - let parent_header = service.client().header(&block_id).unwrap().unwrap(); - let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone()); - let proposer_factory = consensus::ProposerFactory { - client: service.client().clone(), - transaction_pool: service.transaction_pool().clone(), - network: consensus_net, - offline: offline.clone(), - force_delay: 0, - handle: dummy_runtime.executor(), - }; - let (proposer, _, _) = proposer_factory.init(&parent_header, &validators, alice.clone()).unwrap(); - let block = proposer.propose().expect("Error making test block"); - ImportBlock { - origin: BlockOrigin::File, - external_justification: Vec::new(), - internal_justification: Vec::new(), - finalized: true, - body: Some(block.extrinsics), - header: block.header, - auxiliary: Vec::new(), - } - }; - let extrinsic_factory = |service: &::FullService| { - let payload = (0, Call::Balances(BalancesCall::transfer(RawAddress::Id(bob.public().0.into()), 69.into())), Era::immortal(), service.client().genesis_hash()); - let signature = alice.sign(&payload.encode()).into(); - let id = alice.public().0.into(); - let xt = UncheckedExtrinsic { - signature: Some((RawAddress::Id(id), signature, payload.0, Era::immortal())), - function: payload.1, - }.encode(); - let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); - OpaqueExtrinsic(v) - }; - service_test::sync::(chain_spec::integration_test_config(), block_factory, extrinsic_factory); - } - -} diff --git a/substrate/scripts/getgoing.sh b/substrate/scripts/getgoing.sh new file mode 100644 index 0000000000..98f360837d --- /dev/null +++ b/substrate/scripts/getgoing.sh @@ -0,0 +1,6 @@ +/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" +brew install openssl cmake +curl https://sh.rustup.rs -sSf | sh +source ~/.cargo/env +cargo install --git https://github.com/paritytech/substrate subkey +cargo install --git https://github.com/paritytech/substrate substrate diff --git a/substrate/srml/assets/Cargo.toml b/substrate/srml/assets/Cargo.toml new file mode 100644 index 0000000000..58d84fb9bb --- /dev/null +++ b/substrate/srml/assets/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "srml-assets" +version = "0.1.0" +authors = ["Parity Technologies "] + +[dependencies] +hex-literal = "0.1.0" +serde = { version = "1.0", default-features = false } +serde_derive = { version = "1.0", optional = true } +parity-codec = { version = "2.1", default-features = false } +parity-codec-derive = { version = "2.1", default-features = false } +substrate-primitives = { path = "../../core/primitives", default-features = false } +sr-std = { path = "../../core/sr-std", default-features = false } +sr-io = { path = "../../core/sr-io", default-features = false } +sr-primitives = { path = "../../core/sr-primitives", default-features = false } +srml-support = { path = "../support", default-features = false } +srml-system = { path = "../system", default-features = false } + +[features] +default = ["std"] +std = [ + "serde/std", + "serde_derive", + "parity-codec/std", + "parity-codec-derive/std", + "substrate-primitives/std", + "sr-std/std", + "sr-io/std", + "sr-primitives/std", + "srml-support/std", + "srml-system/std", +] diff --git a/substrate/srml/assets/src/lib.rs b/substrate/srml/assets/src/lib.rs new file mode 100644 index 0000000000..61b36dc3c1 --- /dev/null +++ b/substrate/srml/assets/src/lib.rs @@ -0,0 +1,223 @@ +// Copyright 2017-2018 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! A simple, secure module for dealing with fungible assets. + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +// Assert macros used in tests. +extern crate sr_std; + +// Needed for tests (`with_externalities`). +#[cfg(test)] +extern crate sr_io as runtime_io; + +// Needed for the set of mock primitives used in our tests. +#[cfg(test)] +extern crate substrate_primitives; + +// Needed for deriving `Serialize` and `Deserialize` for various types. +// We only implement the serde traits for std builds - they're unneeded +// in the wasm runtime. +#[cfg(feature = "std")] +#[macro_use] +extern crate serde_derive; + +// Needed for deriving `Encode` and `Decode` for `RawEvent`. +#[macro_use] +extern crate parity_codec_derive; +extern crate parity_codec as codec; + +// Needed for type-safe access to storage DB. +#[macro_use] +extern crate srml_support as runtime_support; + +// Needed for various traits. In our case, `OnFinalise`. +extern crate sr_primitives as primitives; +// `system` module provides us with all sorts of useful stuff and macros +// depend on it being around. +extern crate srml_system as system; + +use primitives::traits::OnFinalise; +use runtime_support::{StorageValue, StorageMap, dispatch::Result, Parameter}; +use primitives::traits::{Member, SimpleArithmetic, Zero}; +use system::ensure_signed; + +pub trait Trait: system::Trait { + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// The units in which we record balances. + type Balance: Member + Parameter + SimpleArithmetic + Default + Copy; +} + +type AssetId = u32; + +decl_module! { + // Simple declaration of the `Module` type. Lets the macro know what its working on. + pub struct Module for enum Call where origin: T::Origin { + /// Issue a new class of fungible assets. There are, and will only ever be, `total` + /// such assets and they'll all belong to the `origin` initially. It will have an + /// identifier `AssetId` instance: this will be specified in the `Issued` event. + fn issue(origin, total: T::Balance) -> Result; + + /// Move some assets from one holder to another. + fn transfer(origin, id: AssetId, target: T::AccountId, total: T::Balance) -> Result; + + /// Destroy any assets of `id` owned by `origin`. + fn destroy(origin, id: AssetId) -> Result; + } +} + +/// An event in this module. Events are simple means of reporting specific conditions and +/// circumstances that have happened that users, Dapps and/or chain explorers would find +/// interesting and otherwise difficult to detect. +decl_event!( + pub enum Event where ::AccountId, ::Balance { + /// Some assets were issued. + Issued(AssetId, AccountId, Balance), + /// Some assets were transfered. + Transfered(AssetId, AccountId, AccountId, Balance), + /// Some assets were destroyed. + Destroyed(AssetId, AccountId, Balance), + } +); + +decl_storage! { + trait Store for Module as Assets { + /// The number of units of assets held by any given account. + Balances: map (AssetId, T::AccountId) => T::Balance; + /// The next asset identifier up for grabs. + NextAssetId get(next_asset_id): AssetId; + } +} + +// The main implementation block for the module. +impl Module { + /// Deposit one of this module's events. + // TODO: move into `decl_module` macro. + fn deposit_event(event: Event) { + >::deposit_event(::Event::from(event).into()); + } + + // Public immutables + + /// Get the asset `id` balance of `who`. + pub fn balance(id: AssetId, who: T::AccountId) -> T::Balance { + >::get((id, who)) + } + + // Implement Calls and add public immutables and private mutables. + + fn issue(origin: T::Origin, total: T::Balance) -> Result { + let origin = ensure_signed(origin)?; + + let id = Self::next_asset_id(); + >::mutate(|id| *id += 1); + + + >::insert((id, origin.clone()), total); + + Self::deposit_event(RawEvent::Issued(id, origin, total)); + Ok(()) + } + + fn transfer(origin: T::Origin, id: AssetId, target: T::AccountId, amount: T::Balance) -> Result { + let origin = ensure_signed(origin)?; + let origin_account = (id, origin.clone()); + let origin_balance = >::get(&origin_account); + ensure!(origin_balance >= amount, "origin account balance must be greater than amount"); + + Self::deposit_event(RawEvent::Transfered(id, origin, target.clone(), amount)); + >::insert(origin_account, origin_balance - amount); + >::mutate((id, target), |balance| *balance += amount); + + Ok(()) + } + + fn destroy(origin: T::Origin, id: AssetId) -> Result { + let origin = ensure_signed(origin)?; + + let balance = >::take((id, origin.clone())); + ensure!(!balance.is_zero(), "origin balance should be non-zero"); + + Self::deposit_event(RawEvent::Destroyed(id, origin, balance)); + + Ok(()) + } +} + +// This trait expresses what should happen when the block is finalised. +impl OnFinalise for Module {} + +#[cfg(test)] +mod tests { + use super::*; + + use runtime_io::with_externalities; + use substrate_primitives::{H256, Blake2Hasher}; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried. + use primitives::{BuildStorage, traits::{BlakeTwo256}, testing::{Digest, DigestItem, Header}}; + + impl_outer_origin! { + pub enum Origin for Test {} + } + + // For testing the module, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of modules we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type Digest = Digest; + type AccountId = u64; + type Header = Header; + type Event = (); + type Log = DigestItem; + } + impl Trait for Test { + type Event = (); + type Balance = u64; + } + type Assets = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + fn new_test_ext() -> runtime_io::TestExternalities { + system::GenesisConfig::::default().build_storage().unwrap().into() + } + + #[test] + fn it_works() { + with_externalities(&mut new_test_ext(), || { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + assert_ok!(Assets::destroy(Origin::signed(2), 0)); + assert_eq!(Assets::balance(0, 2), 0); + assert_noop!(Assets::transfer(Origin::signed(2), 0, 1, 50), "origin account balance must be greater than amount"); + }); + } +} diff --git a/substrate/srml/consensus/src/lib.rs b/substrate/srml/consensus/src/lib.rs index b1d0905e9f..835d39a44c 100644 --- a/substrate/srml/consensus/src/lib.rs +++ b/substrate/srml/consensus/src/lib.rs @@ -41,11 +41,15 @@ extern crate substrate_primitives; extern crate sr_io as runtime_io; use rstd::prelude::*; +use rstd::result; use runtime_support::{storage, Parameter}; use runtime_support::dispatch::Result; use runtime_support::storage::StorageValue; use runtime_support::storage::unhashed::StorageVec; -use primitives::traits::{MaybeSerializeDebug, OnFinalise, Member}; +use primitives::RuntimeString; +use primitives::traits::{ + MaybeSerializeDebug, OnFinalise, Member, ProvideInherent, Block as BlockT +}; use substrate_primitives::storage::well_known_keys; use system::{ensure_signed, ensure_inherent}; @@ -235,6 +239,35 @@ impl Module { } } +impl ProvideInherent for Module { + type Inherent = Vec; + type Call = Call; + type Error = RuntimeString; + + fn create_inherent_extrinsics(data: Self::Inherent) -> Vec<(u32, Self::Call)> { + vec![(T::NOTE_OFFLINE_POSITION, Call::note_offline(data))] + } + + fn check_inherent Option<&Self::Call>>( + block: &Block, data: Self::Inherent, extract_function: &F + ) -> result::Result<(), Self::Error> { + let noted_offline = block + .extrinsics().get(T::NOTE_OFFLINE_POSITION as usize) + .and_then(|xt| match extract_function(&xt) { + Some(Call::note_offline(ref x)) => Some(&x[..]), + _ => None, + }).unwrap_or(&[]); + + noted_offline.iter().try_for_each(|n| + if !data.contains(n) { + Err("Online node marked offline".into()) + } else { + Ok(()) + } + ) + } +} + /// Finalization hook for the consensus module. impl OnFinalise for Module { fn on_finalise(_n: T::BlockNumber) { @@ -246,4 +279,3 @@ impl OnFinalise for Module { } } } - diff --git a/substrate/srml/example/Cargo.toml b/substrate/srml/example/Cargo.toml index ae2174ca23..34d8d058d1 100644 --- a/substrate/srml/example/Cargo.toml +++ b/substrate/srml/example/Cargo.toml @@ -20,15 +20,15 @@ srml-balances = { path = "../balances", default-features = false } [features] default = ["std"] std = [ - "sr-std/std", - "sr-io/std", - "srml-support/std", - "sr-primitives/std", - "srml-balances/std", "serde/std", "serde_derive", "parity-codec/std", "parity-codec-derive/std", + "sr-std/std", + "sr-io/std", + "sr-primitives/std", "substrate-primitives/std", + "srml-support/std", "srml-system/std", + "srml-balances/std", ] diff --git a/substrate/srml/example/src/lib.rs b/substrate/srml/example/src/lib.rs index 8952893540..f60168d37b 100644 --- a/substrate/srml/example/src/lib.rs +++ b/substrate/srml/example/src/lib.rs @@ -25,12 +25,15 @@ extern crate sr_std; // Needed for tests (`with_externalities`). #[cfg(test)] -extern crate sr_io as runtime_io; +extern crate sr_io; // Needed for the set of mock primitives used in our tests. #[cfg(test)] extern crate substrate_primitives; +// Needed for various traits. In our case, `OnFinalise`. +extern crate sr_primitives; + // Needed for deriving `Serialize` and `Deserialize` for various types. // We only implement the serde traits for std builds - they're unneeded // in the wasm runtime. @@ -45,10 +48,7 @@ extern crate parity_codec as codec; // Needed for type-safe access to storage DB. #[macro_use] -extern crate srml_support as runtime_support; - -// Needed for various traits. In our case, `OnFinalise`. -extern crate sr_primitives as runtime_primitives; +extern crate srml_support as support; // `system` module provides us with all sorts of useful stuff and macros // depend on it being around. extern crate srml_system as system; @@ -57,8 +57,8 @@ extern crate srml_system as system; // might find it useful). extern crate srml_balances as balances; -use runtime_primitives::traits::OnFinalise; -use runtime_support::{StorageValue, dispatch::Result}; +use sr_primitives::traits::OnFinalise; +use support::{StorageValue, dispatch::Result}; use system::ensure_signed; /// Our module's configuration trait. All our types and consts go in here. If the @@ -147,8 +147,8 @@ decl_storage! { // e.g. pub Bar get(bar): map T::AccountId => Vec<(T::Balance, u64)>; // // For basic value items, you'll get a type which implements - // `runtime_support::StorageValue`. For map items, you'll get a type which - // implements `runtime_support::StorageMap`. + // `support::StorageValue`. For map items, you'll get a type which + // implements `support::StorageMap`. // // If they have a getter (`get(getter_name)`), then your module will come // equipped with `fn getter_name() -> Type` for basic value items or @@ -241,6 +241,7 @@ impl Module { Ok(()) } + #[allow(dead_code)] fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> Result { let _sender = ensure_signed(origin)?; @@ -278,15 +279,11 @@ impl OnFinalise for Module { mod tests { use super::*; - use runtime_io::with_externalities; + use sr_io::with_externalities; use substrate_primitives::{H256, Blake2Hasher}; - use runtime_primitives::BuildStorage; - use runtime_primitives::traits::{BlakeTwo256}; - use runtime_primitives::testing::DigestItem; - // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried. - use runtime_primitives::testing::{Digest, Header}; + use sr_primitives::{BuildStorage, traits::{BlakeTwo256}, testing::{Digest, DigestItem, Header}}; impl_outer_origin! { pub enum Origin for Test {} @@ -323,7 +320,7 @@ mod tests { // This function basically just builds a genesis storage key/value store according to // our desired mockup. - fn new_test_ext() -> runtime_io::TestExternalities { + fn new_test_ext() -> sr_io::TestExternalities { let mut t = system::GenesisConfig::::default().build_storage().unwrap(); // We use default for brevity, but you can configure as desired if needed. t.extend(balances::GenesisConfig::::default().build_storage().unwrap()); diff --git a/substrate/srml/executive/src/lib.rs b/substrate/srml/executive/src/lib.rs index 49daf95fa5..77ef9655d0 100644 --- a/substrate/srml/executive/src/lib.rs +++ b/substrate/srml/executive/src/lib.rs @@ -260,12 +260,12 @@ impl< expected_index = expected_index + One::one(); } - TransactionValidity::Valid( - /*priority: */encoded_len as TransactionPriority, - /*requires: */deps, - /*provides: */vec![(sender, *index).encode()], - /*longevity: */TransactionLongevity::max_value(), - ) + TransactionValidity::Valid { + priority: encoded_len as TransactionPriority, + requires: deps, + provides: vec![(sender, *index).encode()], + longevity: TransactionLongevity::max_value(), + } } else { return TransactionValidity::Invalid } diff --git a/substrate/srml/support/src/inherent.rs b/substrate/srml/support/src/inherent.rs new file mode 100644 index 0000000000..f87a41806a --- /dev/null +++ b/substrate/srml/support/src/inherent.rs @@ -0,0 +1,128 @@ +// Copyright 2018 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +#[doc(hidden)] +pub use rstd::{result::Result, vec::Vec}; +#[doc(hidden)] +pub use runtime_primitives::traits::ProvideInherent; + + +/// Implement the outer inherent. +/// All given modules need to implement `ProvideInherent`. +/// +/// # Example +/// +/// ```nocompile +/// impl_outer_inherent! { +/// pub struct InherentData where Block = Block, UncheckedExtrinsic = UncheckedExtrinsic { +/// timestamp: Timestamp export Error as TimestampInherentError, +/// consensus: Consensus, +/// } +/// } +/// ``` +/// +/// Additional parameters after `UncheckedExtrinsic` are `Error` and `Call`. +#[macro_export] +macro_rules! impl_outer_inherent { + ( + $(#[$attr:meta])* + pub struct $name:ident where Block = $block:ident, UncheckedExtrinsic = $unchecked:ident { + $( $module:ident: $module_ty:ident $(export Error as $error_name:ident)*, )* + } + ) => { + impl_outer_inherent!( + $( #[$attr] )* + pub struct $name where Block = $block, UncheckedExtrinsic = $unchecked, Error = InherentError, Call = Call { + $( $module: $module_ty $(export Error as $error_name)*, )* + } + ); + }; + ( + $(#[$attr:meta])* + pub struct $name:ident where Block = $block:ident, UncheckedExtrinsic = $unchecked:ident, Error = $error:ident { + $( $module:ident: $module_ty:ident $(export Error as $error_name:ident)*, )* + } + ) => { + impl_outer_inherent!( + $( #[$attr] )* + pub struct $name where Block = $block, UncheckedExtrinsic = $unchecked, Error = $error, Call = Call { + $( $module: $module_ty $(export Error as $error_name)*, )* + } + ); + }; + ( + $(#[$attr:meta])* + pub struct $name:ident where Block = $block:ident, UncheckedExtrinsic = $unchecked:ident, Error = $error:ident, Call = $call:ident { + $( $module:ident: $module_ty:ident $(export Error as $error_name:ident)*, )* + } + ) => { + $( #[$attr] )* + // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. + #[derive(Encode, Decode)] + /// Inherent data to include in a block. + pub struct $name { + $( $module: <$module_ty as $crate::inherent::ProvideInherent>::Inherent, )* + } + + $( + $( + pub type $error_name =<$module_ty as $crate::inherent::ProvideInherent>::Error; + )* + )* + + impl $name { + /// Create a new instance. + pub fn new( $( $module: <$module_ty as $crate::inherent::ProvideInherent>::Inherent ),* ) -> Self { + Self { + $( $module, )* + } + } + + fn create_inherent_extrinsics(self) -> Vec<$unchecked> { + let mut inherent = $crate::inherent::Vec::new(); + + $( + inherent.extend( + <$module_ty as $crate::inherent::ProvideInherent>::create_inherent_extrinsics(self.$module) + .into_iter() + .map(|v| (v.0, $unchecked::new_unsigned($call::$module_ty(v.1)))) + ); + )* + + inherent.as_mut_slice().sort_unstable_by_key(|v| v.0); + inherent.into_iter().map(|v| v.1).collect() + } + + fn check_inherents(self, block: $block) -> $crate::inherent::Result<(), $error> { + $( + <$module_ty as $crate::inherent::ProvideInherent>::check_inherent( + &block, self.$module, &|xt| match xt.function { + Call::$module_ty(ref data) => Some(data), + _ => None, + }).map_err($error::$module_ty)?; + )* + Ok(()) + } + } + + // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. + #[derive(Encode)] + #[cfg_attr(feature = "std", derive(Decode))] + pub enum $error { + $( $module_ty(<$module_ty as $crate::inherent::ProvideInherent>::Error), )* + } + }; +} diff --git a/substrate/srml/support/src/lib.rs b/substrate/srml/support/src/lib.rs index eba740ab7a..02747b199a 100644 --- a/substrate/srml/support/src/lib.rs +++ b/substrate/srml/support/src/lib.rs @@ -21,15 +21,12 @@ #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc))] -#[cfg(not(feature = "std"))] -extern crate alloc; - #[cfg(feature = "std")] extern crate serde; extern crate sr_std as rstd; extern crate sr_io as runtime_io; -#[cfg(feature = "std")] +#[doc(hidden)] pub extern crate sr_primitives as runtime_primitives; extern crate substrate_metadata; @@ -49,12 +46,6 @@ extern crate parity_codec_derive; pub extern crate parity_codec as codec; pub use self::storage::generator::Storage as GenericStorage; -#[cfg(feature = "std")] -pub mod alloc { - pub use std::boxed; - pub use std::vec; -} - #[macro_use] pub mod dispatch; #[macro_use] @@ -68,6 +59,8 @@ mod origin; pub mod metadata; #[macro_use] mod runtime; +#[macro_use] +pub mod inherent; pub use self::storage::{StorageVec, StorageList, StorageValue, StorageMap}; pub use self::hashable::Hashable; diff --git a/substrate/srml/support/src/runtime.rs b/substrate/srml/support/src/runtime.rs index df8fa4c4d4..3e42e1970e 100644 --- a/substrate/srml/support/src/runtime.rs +++ b/substrate/srml/support/src/runtime.rs @@ -42,17 +42,21 @@ /// - `Storage` /// - `Event` or `Event` (if the event is generic) /// - `Origin` or `Origin` (if the origin is generic) -/// - `Config` +/// - `Config` or `Config` (if the config is generic) /// - `Log( $(IDENT),* )` #[macro_export] macro_rules! construct_runtime { ( - pub enum $runtime:ident with Log ($log_internal:ident: DigestItem<$( $log_genarg:ty ),+>) { + pub enum $runtime:ident with Log ($log_internal:ident: DigestItem<$( $log_genarg:ty ),+>) + where Block = $block:ident, UncheckedExtrinsic = $unchecked:ident + { $( $rest:tt )* } ) => { construct_runtime!( $runtime; + $block; + $unchecked; $log_internal < $( $log_genarg ),* >; ; $( $rest )* @@ -60,6 +64,8 @@ macro_rules! construct_runtime { }; ( $runtime:ident; + $block:ident; + $unchecked:ident; $log_internal:ident <$( $log_genarg:ty ),+>; $( $expanded_name:ident: $expanded_module:ident::{ @@ -85,6 +91,8 @@ macro_rules! construct_runtime { ) => { construct_runtime!( $runtime; + $block; + $unchecked; $log_internal < $( $log_genarg ),* >; $( $expanded_name: $expanded_module::{ @@ -94,7 +102,7 @@ macro_rules! construct_runtime { $( ( $( $expanded_modules_args ),* ) )* ),* }, - )* $name: $module::{Module, Call, Storage, Event, Config}; + )* $name: $module::{Module, Call, Storage, Event, Config}; $( $rest_name: $rest_module $( ::{ @@ -110,6 +118,8 @@ macro_rules! construct_runtime { }; ( $runtime:ident; + $block:ident; + $unchecked:ident; $log_internal:ident <$( $log_genarg:ty ),+>; $( $expanded_name:ident: $expanded_module:ident::{ @@ -142,6 +152,8 @@ macro_rules! construct_runtime { ) => { construct_runtime!( $runtime; + $block; + $unchecked; $log_internal < $( $log_genarg ),* >; $( $expanded_name: $expanded_module::{ @@ -153,7 +165,7 @@ macro_rules! construct_runtime { }, )* $name: $module::{ - Module, Call, Storage, Event, Config, + Module, Call, Storage, Event, Config, $( $modules $( <$modules_generic> )* $( ( $( $modules_args ),* ) )* ),* @@ -173,6 +185,8 @@ macro_rules! construct_runtime { }; ( $runtime:ident; + $block:ident; + $unchecked:ident; $log_internal:ident <$( $log_genarg:ty ),+>; $( $expanded_name:ident: $expanded_module:ident::{ @@ -204,6 +218,8 @@ macro_rules! construct_runtime { ) => { construct_runtime!( $runtime; + $block; + $unchecked; $log_internal < $( $log_genarg ),* >; $( $expanded_name: $expanded_module::{ @@ -234,6 +250,8 @@ macro_rules! construct_runtime { }; ( $runtime:ident; + $block:ident; + $unchecked:ident; $log_internal:ident <$( $log_genarg:ty ),+>; $( $name:ident: $module:ident::{ @@ -245,6 +263,13 @@ macro_rules! construct_runtime { } ),*; ) => { + mashup! { + $( + substrate_generate_ident_name["config-ident" $name] = $name Config; + substrate_generate_ident_name["inherent-error-ident" $name] = $name InherentError; + )* + } + #[derive(Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] pub struct $runtime; @@ -298,6 +323,15 @@ macro_rules! construct_runtime { $name: $module::{ $( $modules $( <$modules_generic> )* ),* } ),*; ); + __decl_outer_inherent!( + $runtime; + $block; + $unchecked; + ; + $( + $name: $module::{ $( $modules $( <$modules_generic> )* ),* } + ),*; + ); } } @@ -918,7 +952,7 @@ macro_rules! __decl_outer_log { macro_rules! __decl_outer_config { ( $runtime:ident; - $( $parsed_modules:ident :: $parsed_name:ident ),*; + $( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*; $name:ident: $module:ident::{ Config $(, $modules:ident $( <$modules_generic:ident> )* )* } @@ -928,6 +962,109 @@ macro_rules! __decl_outer_config { ) => { __decl_outer_config!( $runtime; + $( $parsed_modules :: $parsed_name $( < $parsed_generic > )*, )* $module::$name; + $( + $rest_name: $rest_module::{ + $( $rest_modules $( <$rest_modules_generic> )* ),* + } + ),*; + ); + }; + ( + $runtime:ident; + $( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*; + $name:ident: $module:ident::{ + Config $(, $modules:ident $( <$modules_generic:ident> )* )* + } + $(, $rest_name:ident : $rest_module:ident::{ + $( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),* + })*; + ) => { + __decl_outer_config!( + $runtime; + $( $parsed_modules :: $parsed_name $( < $parsed_generic > )*, )* $module::$name; + $( + $rest_name: $rest_module::{ + $( $rest_modules $( <$rest_modules_generic> )* ),* + } + ),*; + ); + }; + ( + $runtime:ident; + $( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*; + $name:ident: $module:ident::{ + $ingore:ident $( <$ignor:ident> )* $(, $modules:ident $( <$modules_generic:ident> )* )* + } + $(, $rest_name:ident : $rest_module:ident::{ + $( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),* + })*; + ) => { + __decl_outer_config!( + $runtime; + $( $parsed_modules :: $parsed_name $( < $parsed_generic > )*),*; + $name: $module::{ $( $modules $( <$modules_generic> )* ),* } + $( + , $rest_name: $rest_module::{ + $( $rest_modules $( <$rest_modules_generic> )* ),* + } + )*; + ); + }; + ( + $runtime:ident; + $( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*; + $name:ident: $module:ident::{} + $(, $rest_name:ident : $rest_module:ident::{ + $( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),* + })*; + ) => { + __decl_outer_config!( + $runtime; + $( $parsed_modules :: $parsed_name $( < $parsed_generic > )*),*; + $( + $rest_name: $rest_module::{ + $( $rest_modules $( <$rest_modules_generic> )* ),* + } + ),*; + ); + }; + ( + $runtime:ident; + $( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*; + ; + ) => { + substrate_generate_ident_name! { + impl_outer_config!( + pub struct GenesisConfig for $runtime { + $( + "config-ident" $parsed_name => $parsed_modules $( < $parsed_generic > )*, + )* + } + ); + } + }; +} + +#[macro_export] +#[doc(hidden)] +macro_rules! __decl_outer_inherent { + ( + $runtime:ident; + $block:ident; + $unchecked:ident; + $( $parsed_modules:ident :: $parsed_name:ident ),*; + $name:ident: $module:ident::{ + Inherent $(, $modules:ident $( <$modules_generic:ident> )* )* + } + $(, $rest_name:ident : $rest_module:ident::{ + $( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),* + })*; + ) => { + __decl_outer_inherent!( + $runtime; + $block; + $unchecked; $( $parsed_modules :: $parsed_name, )* $module::$name; $( $rest_name: $rest_module::{ @@ -938,6 +1075,8 @@ macro_rules! __decl_outer_config { }; ( $runtime:ident; + $block:ident; + $unchecked:ident; $( $parsed_modules:ident :: $parsed_name:ident ),*; $name:ident: $module:ident::{ $ingore:ident $( <$ignor:ident> )* $(, $modules:ident $( <$modules_generic:ident> )* )* @@ -946,8 +1085,10 @@ macro_rules! __decl_outer_config { $( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),* })*; ) => { - __decl_outer_config!( + __decl_outer_inherent!( $runtime; + $block; + $unchecked; $( $parsed_modules :: $parsed_name ),*; $name: $module::{ $( $modules $( <$modules_generic> )* ),* } $( @@ -959,14 +1100,18 @@ macro_rules! __decl_outer_config { }; ( $runtime:ident; + $block:ident; + $unchecked:ident; $( $parsed_modules:ident :: $parsed_name:ident ),*; $name:ident: $module:ident::{} $(, $rest_name:ident : $rest_module:ident::{ $( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),* })*; ) => { - __decl_outer_config!( + __decl_outer_inherent!( $runtime; + $block; + $unchecked; $( $parsed_modules :: $parsed_name ),*; $( $rest_name: $rest_module::{ @@ -977,20 +1122,16 @@ macro_rules! __decl_outer_config { }; ( $runtime:ident; + $block:ident; + $unchecked:ident; $( $parsed_modules:ident :: $parsed_name:ident ),*; ; ) => { - mashup! { - $( - substrate_generate_config_name["config-name" $parsed_name] = $parsed_name Config; - )* - } - - substrate_generate_config_name! { - impl_outer_config!( - pub struct GenesisConfig for $runtime { + substrate_generate_ident_name! { + impl_outer_inherent!( + pub struct InherentData where Block = $block, UncheckedExtrinsic = $unchecked { $( - "config-name" $parsed_name => $parsed_modules, + $parsed_modules: $parsed_name export Error as "inherent-error-ident" $parsed_name, )* } ); diff --git a/substrate/srml/support/src/storage/generator.rs b/substrate/srml/support/src/storage/generator.rs index 2c6c96e600..8398819ebe 100644 --- a/substrate/srml/support/src/storage/generator.rs +++ b/substrate/srml/support/src/storage/generator.rs @@ -590,6 +590,21 @@ macro_rules! __generate_genesis_config { ); }; + // Do not generate any `GenesisConfig`, if we not require it. + (@GEN + [$traittype:ident $traitinstance:ident] + // normal getters + [] + // for normal builders + [$( $normalclassname:ident ($normalbuild:expr) ;)*] + // for map builders + [$( $mapclassname:ident ($mapbuild:expr) ;)*] + // extra genesis fields + [] + // final build storage call + [$call:expr] + ) => {}; + (@GEN [$traittype:ident $traitinstance:ident] // normal getters diff --git a/substrate/srml/timestamp/Cargo.toml b/substrate/srml/timestamp/Cargo.toml index 3b06f6c39a..c07a62aeda 100644 --- a/substrate/srml/timestamp/Cargo.toml +++ b/substrate/srml/timestamp/Cargo.toml @@ -7,6 +7,7 @@ authors = ["Parity Technologies "] hex-literal = "0.1.0" serde = { version = "1.0", default-features = false } serde_derive = { version = "1.0", optional = true } +parity-codec-derive = { version = "2.1", default-features = false } parity-codec = { version = "2.1", default-features = false } substrate-primitives = { path = "../../core/primitives", default-features = false } sr-std = { path = "../../core/sr-std", default-features = false } @@ -29,6 +30,7 @@ std = [ "srml-consensus/std", "serde/std", "serde_derive", + "parity-codec-derive/std", "parity-codec/std", "substrate-primitives/std", "srml-system/std", diff --git a/substrate/srml/timestamp/src/lib.rs b/substrate/srml/timestamp/src/lib.rs index 6d64800189..c13972c7da 100644 --- a/substrate/srml/timestamp/src/lib.rs +++ b/substrate/srml/timestamp/src/lib.rs @@ -33,6 +33,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg_attr(not(feature = "std"), macro_use)] extern crate sr_std as rstd; #[macro_use] @@ -50,14 +51,18 @@ extern crate sr_primitives as runtime_primitives; extern crate srml_system as system; extern crate srml_consensus as consensus; extern crate parity_codec as codec; +#[macro_use] +extern crate parity_codec_derive; use codec::HasCompact; use runtime_support::{StorageValue, Parameter}; use runtime_support::dispatch::Result; -use runtime_primitives::traits::{As, OnFinalise, SimpleArithmetic, Zero}; +use runtime_primitives::RuntimeString; +use runtime_primitives::traits::{ + As, OnFinalise, SimpleArithmetic, Zero, ProvideInherent, Block as BlockT, Extrinsic +}; use system::ensure_inherent; -use rstd::ops::{Mul, Div}; - +use rstd::{result, ops::{Mul, Div}, vec::Vec}; pub trait Trait: consensus::Trait + system::Trait { /// The position of the required timestamp-set extrinsic. @@ -106,7 +111,7 @@ impl Module { fn set(origin: T::Origin, now: ::Type) -> Result { ensure_inherent(origin)?; let now = now.into(); - + assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); assert!( >::extrinsic_index() == Some(T::TIMESTAMP_SET_POSITION), @@ -123,12 +128,49 @@ impl Module { } /// Set the timestamp to something in particular. Only used for tests. - #[cfg(any(feature = "std", test))] + #[cfg(feature = "std")] pub fn set_timestamp(now: T::Moment) { ::Now::put(now); } } +#[derive(Encode)] +#[cfg_attr(feature = "std", derive(Decode))] +pub enum InherentError { + Other(RuntimeString), + TimestampInFuture(u64), +} + +impl ProvideInherent for Module { + type Inherent = T::Moment; + type Call = Call; + type Error = InherentError; + + fn create_inherent_extrinsics(data: Self::Inherent) -> Vec<(u32, Self::Call)> { + vec![(T::TIMESTAMP_SET_POSITION, Call::set(data.into()))] + } + + fn check_inherent Option<&Self::Call>>( + block: &Block, data: Self::Inherent, extract_function: &F + ) -> result::Result<(), Self::Error> { + const MAX_TIMESTAMP_DRIFT: u64 = 60; + + let xt = block.extrinsics().get(T::TIMESTAMP_SET_POSITION as usize) + .ok_or_else(|| InherentError::Other("No valid timestamp inherent in block".into()))?; + + let t = match (xt.is_signed(), extract_function(&xt)) { + (Some(false), Some(Call::set(ref t))) => t.clone(), + _ => return Err(InherentError::Other("No valid timestamp inherent in block".into())), + }.into().as_(); + + if t > data.as_() + MAX_TIMESTAMP_DRIFT { + Err(InherentError::TimestampInFuture(t)) + } else { + Ok(()) + } + } +} + impl OnFinalise for Module { fn on_finalise(_n: T::BlockNumber) { assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); diff --git a/substrate/srml/treasury/src/lib.rs b/substrate/srml/treasury/src/lib.rs index ae8aa9ceec..febe5fed89 100644 --- a/substrate/srml/treasury/src/lib.rs +++ b/substrate/srml/treasury/src/lib.rs @@ -85,7 +85,7 @@ decl_module! { fn configure(proposal_bond: Permill, proposal_bond_minimum: ::Type, spend_period: ::Type, burn: Permill) -> Result; // Reject a proposed spend. The original deposit will be slashed. - fn reject_proposal(origin, roposal_id: Compact) -> Result; + fn reject_proposal(origin, proposal_id: Compact) -> Result; // Approve a proposal. At a later time, the proposal will be allocated to the beneficiary // and the original deposit will be returned. diff --git a/substrate/subkey/src/main.rs b/substrate/subkey/src/main.rs index afe78f2a0b..a89f7911ac 100644 --- a/substrate/subkey/src/main.rs +++ b/substrate/subkey/src/main.rs @@ -34,17 +34,12 @@ fn main() { match matches.subcommand() { ("vanity", Some(matches)) => { let desired: String = matches.value_of("pattern").map(str::to_string).unwrap_or_default(); - let amount_of_keys = matches.value_of("number") - .expect("`number` has a default value; thus it can't be None; qed"); - let amount_of_keys: usize = amount_of_keys.parse::().expect("Failed to parse number"); - - let keys = vanity::generate_key(&desired, amount_of_keys, true).expect("Key generation failed"); - for key in keys { - println!("{} - {} ({}%)", - key.pair.public().to_ss58check(), - HexDisplay::from(&key.seed), - key.score); - } + let key = vanity::generate_key(&desired).expect("Key generation failed"); + println!("Seed {} (hex: 0x{}) - {} ({}%)", + key.pair.public().to_ss58check(), + HexDisplay::from(&key.pair.public().0), + HexDisplay::from(&key.seed), + key.score); } ("restore", Some(matches)) => { let mut raw_seed = matches.value_of("seed") @@ -63,7 +58,11 @@ fn main() { seed[..len].copy_from_slice(&raw_seed[..len]); let pair = Pair::from_seed(&seed); - println!("{}: {}", HexDisplay::from(&seed), pair.public().to_ss58check()); + println!("Seed 0x{} is account:\n SS58: {}\n Hex: 0x{}", + HexDisplay::from(&seed), + pair.public().to_ss58check(), + HexDisplay::from(&pair.public().0) + ); }, _ => print_usage(&matches), } diff --git a/substrate/subkey/src/vanity.rs b/substrate/subkey/src/vanity.rs index a3c0dab37d..fea1066e2d 100644 --- a/substrate/subkey/src/vanity.rs +++ b/substrate/subkey/src/vanity.rs @@ -16,7 +16,6 @@ use rand::{OsRng, Rng}; use substrate_primitives::ed25519::Pair; -use std::cmp; fn good_waypoint(done: u64) -> u64 { match done { @@ -52,29 +51,25 @@ fn calculate_score(_desired: &str, key: &str) -> usize { let snip_size = _desired.len() - truncate; let truncated = &_desired[0..snip_size]; if let Some(pos) = key.find(truncated) { - let score = cmp::min(100, (51 - pos) + (snip_size * 50 / _desired.len())); - return score; + return (47 - pos) + (snip_size * 48); } } 0 } -pub fn generate_key(_desired: &str, _amount: usize, paranoiac: bool) -> Result, &str> { - println!("Generating {} keys with pattern '{}'", _amount, &_desired); +pub fn generate_key(_desired: &str) -> Result { + println!("Generating key containing pattern '{}'", _desired); - let top = 30 + (_desired.len() * 32); + let top = 45 + (_desired.len() * 48); let mut best = 0; let mut seed = [0u8; 32]; let mut done = 0; - let mut res = vec![]; OsRng::new().unwrap().fill_bytes(&mut seed[..]); loop { - if res.len() >= _amount { break; } - - // reset to a new random seed at beginning and regularly after for paranoia. - if paranoiac || done % 100000 == 0 { + // reset to a new random seed at beginning and regularly thereafter + if done % 100000 == 0 { OsRng::new().unwrap().fill_bytes(&mut seed[..]); } @@ -88,22 +83,18 @@ pub fn generate_key(_desired: &str, _amount: usize, paranoiac: bool) -> Result= top { println!("best: {} == top: {}", best, top); - break; + return Ok(keypair); } } seed = next_seed(seed); done += 1; if done % good_waypoint(done) == 0 { - println!("Stopping after {} keys searched", done); - break; + println!("{} keys searched; best is {}/{} complete", done, best, top); } } - res.sort_unstable_by(|a, b| b.score.cmp(&a.score)); - Ok(res) } #[cfg(test)] @@ -112,49 +103,39 @@ mod tests { #[cfg(feature = "bench")] use test::Bencher; - #[test] - fn test_generation_no_args() { - assert!(generate_key("",1, false).unwrap().len() == 1); - } - #[test] fn test_generation_with_single_char() { - assert!(generate_key("j", 1, false).unwrap().len() == 1); - } - - #[test] - fn test_generation_with_args() { - assert!(generate_key("polka", 2, false).unwrap().len() == 2); + assert!(generate_key("j").unwrap().pair.public().to_ss58check().contains("j")); } #[test] fn test_score_1_char_100() { let score = calculate_score("j", "5jolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"); - assert!(score == 100, format!("Wrong score, we found {}", score)); + assert_eq!(score, 94); } #[test] fn test_score_100() { let score = calculate_score("Polkadot", "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"); - assert!( score == 100, format!("Wrong score, we found {}", score)); + assert_eq!(score, 430); } #[test] fn test_score_50_2() { // 50% for the position + 50% for the size - assert!(calculate_score("Polkadot", "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim") == 75); + assert_eq!(calculate_score("Polkadot", "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"), 238); } #[test] fn test_score_0() { - assert!(calculate_score("Polkadot", "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK") == 0); + assert_eq!(calculate_score("Polkadot", "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK"), 0); } #[cfg(feature = "bench")] #[bench] fn bench_paranoiac(b: &mut Bencher) { b.iter(|| { - generate_key("polka", 3, true) + generate_key("polk") }); } @@ -162,7 +143,7 @@ mod tests { #[bench] fn bench_not_paranoiac(b: &mut Bencher) { b.iter(|| { - generate_key("polka", 3, false) + generate_key("polk") }); } }