Merge branch 'master' into rh-grandpa-dynamic2

This commit is contained in:
Robert Habermeier
2018-10-22 16:06:42 +02:00
87 changed files with 2757 additions and 993 deletions
+2
View File
@@ -0,0 +1,2 @@
doc
target
+2 -1
View File
@@ -32,7 +32,7 @@ variables:
when: on_success
expire_in: 1 mos
paths:
- target/release/polkadot
- target/release/substrate
.determine_version: &determine_version |
export VERSION=$(grep -m 1 "version =" Cargo.toml | awk '{print $3}' | tr -d '"' | tr -d "\n")
@@ -49,6 +49,7 @@ test:rust:stable: &test
script:
- ./scripts/init.sh
- export PATH="${CI_PROJECT_DIR}/cargo/bin/:$PATH"
- export RUST_BACKTRACE=1
- ./scripts/build.sh
- time cargo test --all --release --locked
tags:
+30 -34
View File
@@ -1594,9 +1594,19 @@ name = "node-cli"
version = "0.1.0"
dependencies = [
"exit-future 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"node-service 0.1.0",
"node-executor 0.1.0",
"node-network 0.1.0",
"node-primitives 0.1.0",
"node-runtime 0.1.0",
"sr-primitives 0.1.0",
"substrate-cli 0.3.0",
"substrate-network 0.1.0",
"substrate-primitives 0.1.0",
"substrate-service 0.3.0",
"substrate-service-test 0.3.0",
"substrate-transaction-pool 0.1.0",
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
]
@@ -1713,37 +1723,6 @@ dependencies = [
"substrate-primitives 0.1.0",
]
[[package]]
name = "node-service"
version = "0.1.0"
dependencies = [
"error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
"node-consensus 0.1.0",
"node-executor 0.1.0",
"node-network 0.1.0",
"node-primitives 0.1.0",
"node-runtime 0.1.0",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
"rhododendron 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"slog 2.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-io 0.1.0",
"sr-primitives 0.1.0",
"substrate-client 0.1.0",
"substrate-keyring 0.1.0",
"substrate-network 0.1.0",
"substrate-primitives 0.1.0",
"substrate-service 0.3.0",
"substrate-service-test 0.3.0",
"substrate-telemetry 0.3.0",
"substrate-test-client 0.1.0",
"substrate-transaction-pool 0.1.0",
"tokio 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "nodrop"
version = "0.1.12"
@@ -2521,7 +2500,6 @@ name = "sr-api"
version = "0.1.0"
dependencies = [
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-primitives 0.1.0",
"sr-std 0.1.0",
"sr-version 0.1.0",
@@ -2555,7 +2533,6 @@ dependencies = [
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-io 0.1.0",
"sr-std 0.1.0",
"sr-version 0.1.0",
"substrate-primitives 0.1.0",
]
@@ -2587,9 +2564,27 @@ dependencies = [
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-primitives 0.1.0",
"sr-std 0.1.0",
]
[[package]]
name = "srml-assets"
version = "0.1.0"
dependencies = [
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-io 0.1.0",
"sr-primitives 0.1.0",
"sr-std 0.1.0",
"srml-support 0.1.0",
"srml-system 0.1.0",
"substrate-primitives 0.1.0",
]
[[package]]
name = "srml-balances"
version = "0.1.0"
@@ -2806,6 +2801,7 @@ version = "0.1.0"
dependencies = [
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-io 0.1.0",
+1 -1
View File
@@ -39,6 +39,7 @@ members = [
"core/transaction-pool",
"core/transaction-pool/graph",
"srml/support",
"srml/assets",
"srml/balances",
"srml/consensus",
"srml/contract",
@@ -68,7 +69,6 @@ members = [
"node/network",
"node/primitives",
"node/runtime",
"node/service",
"subkey",
]
exclude = [
+38
View File
@@ -0,0 +1,38 @@
FROM phusion/baseimage:0.10.1 as builder
LABEL maintainer "chevdor@gmail.com"
LABEL description="This is the build stage for Substrate. Here we create the binary."
ARG PROFILE=release
WORKDIR /substrate
COPY . /substrate
RUN apt-get update && \
apt-get upgrade -y && \
apt-get install -y cmake pkg-config libssl-dev git
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && \
export PATH=$PATH:$HOME/.cargo/bin && \
cargo build --$PROFILE
# ===== SECOND STAGE ======
FROM phusion/baseimage:0.10.0
LABEL maintainer "chevdor@gmail.com"
LABEL description="This is the 2nd stage: a very small image where we copy the Substrate binary."
ARG PROFILE=release
COPY --from=builder /substrate/target/$PROFILE/substrate /usr/local/bin
RUN mv /usr/share/ca* /tmp && \
rm -rf /usr/share/* && \
mv /tmp/ca-certificates /usr/share/ && \
rm -rf /usr/lib/python* && \
mkdir -p /root/.local/share/Substrate && \
ln -s /root/.local/share/Substrate /data
RUN rm -rf /usr/bin /usr/sbin
EXPOSE 30333 9933 9944
VOLUME ["/data"]
CMD ["/usr/local/bin/substrate"]
+48
View File
@@ -114,6 +114,54 @@ Inherent extrinsic knowledge is again somewhat generic, and the actual construct
- DAO runtime module
- Audit
== Trying out Substrate Node
Substate Node is Substrate's pre-baked blockchain client. You can run a development node locally or configure a new chain and launch your own global testnet.
=== On Mac
To get going as fast as possible, there is a simple script that installs all required dependencies and installs Substrate into your path. Just open a terminal and run:
[source, shell]
----
curl https://raw.githubusercontent.com/paritytech/substrate/master/scripts/getgoing.sh -sSf | sh
----
You can start a local Substrate development chain with running `substrate --dev`.
To create your own global testnet, you'll need to make a new Substrate Node chain specification file ("chainspec").
First let's get a template chainspec that you can edit. We'll use the "staging" chain, a sort of default chain that the node comes pre-configured with:
[source, shell]
----
substrate build-spec --chain=staging > ~/chainspec.json
----
Now, edit `~/chainspec.json` in your editor. There are a lot of individual fields for each module, and one very large one which contains the Webassembly code blob for this chain. The easiest field to edit is the block `period`. Change it to 10 (seconds):
[source, json]
----
"timestamp": {
"period": 10
},
----
[source, shell]
----
substrate build-spec --chain ~/chainspec.json --raw > ~/mychain.json
----
[source, shell]
----
substrate --chain ~/mychain.json
----
[source, shell]
----
substrate --chain ~/mychain.json --validator --key ...
----
== Building
+5 -5
View File
@@ -91,7 +91,7 @@ pub struct Fork<Block: BlockT, T> {
head: Entry<Block, T>,
}
/// Outcome of Fork::try_append_or_fork.
/// Outcome of Fork::try_append_or_fork.
#[derive(Debug)]
#[cfg_attr(test, derive(PartialEq))]
pub enum ForkAppendResult<Block: BlockT> {
@@ -356,7 +356,7 @@ impl<Block: BlockT, T: CacheItemT, S: Storage<Block, T>> ListCache<Block, T, S>
// if there's an entry at this block:
// - remove reference from this entry to the previous entry
// - destroy fork starting with previous entry
// - destroy fork starting with previous entry
let current_entry = match self.storage.read_entry(&ancient_block)? {
Some(current_entry) => current_entry,
None => return Ok(()),
@@ -583,12 +583,12 @@ fn read_forks<Block: BlockT, T: CacheItemT, S: Storage<Block, T>>(
#[cfg(test)]
pub mod tests {
use runtime_primitives::testing::{Header, Block as RawBlock};
use runtime_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper};
use runtime_primitives::traits::Header as HeaderT;
use cache::list_storage::tests::{DummyStorage, FaultyStorage, DummyTransaction};
use super::*;
type Block = RawBlock<u64>;
type Block = RawBlock<ExtrinsicWrapper<u64>>;
pub fn test_id(number: u64) -> ComplexBlockId<Block> {
ComplexBlockId::new(From::from(number), number)
@@ -834,7 +834,7 @@ pub mod tests {
// when trying to insert block @ finalized number
assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100))
.on_block_insert(&mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), false).unwrap().is_none());
// when trying to insert non-final block AND it appends to the best block of unfinalized fork
// AND new value is the same as in the fork' best block
let mut cache = ListCache::new(
+3 -3
View File
@@ -23,7 +23,7 @@ use kvdb::{KeyValueDB, DBTransaction};
use client::error::{Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult};
use codec::{Encode, Decode};
use runtime_primitives::generic::BlockId;
use runtime_primitives::traits::{Block as BlockT, NumberFor};
use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor};
use utils::{self, db_err, meta_keys};
use cache::{CacheItemT, ComplexBlockId};
@@ -126,7 +126,8 @@ impl DbStorage {
impl<Block: BlockT, T: CacheItemT> Storage<Block, T> for DbStorage {
fn read_id(&self, at: NumberFor<Block>) -> ClientResult<Option<Block::Hash>> {
utils::read_id::<Block>(&*self.db, self.columns.hash_lookup, BlockId::Number(at))
utils::read_header::<Block>(&*self.db, self.columns.hash_lookup, self.columns.header, BlockId::Number(at))
.map(|maybe_header| maybe_header.map(|header| header.hash()))
}
fn read_header(&self, at: &Block::Hash) -> ClientResult<Option<Block::Header>> {
@@ -246,7 +247,6 @@ mod meta {
#[cfg(test)]
pub mod tests {
use std::collections::{HashMap, HashSet};
use runtime_primitives::traits::Header as HeaderT;
use super::*;
pub struct FaultyStorage;
+3 -2
View File
@@ -194,10 +194,11 @@ impl<Block: BlockT> BlockchainCache<Block> for DbCacheSync<Block> {
ComplexBlockId::new(hash, *header.number())
},
BlockId::Number(number) => {
let hash = utils::read_id::<Block>(
let hash = utils::read_header::<Block>(
&**db,
columns.hash_lookup,
BlockId::Number(number.clone())).ok()??;
columns.header,
BlockId::Number(number.clone())).ok()??.hash();
ComplexBlockId::new(hash, number)
},
};
+101 -42
View File
@@ -75,7 +75,7 @@ use runtime_primitives::BuildStorage;
use state_machine::backend::Backend as StateBackend;
use executor::RuntimeInfo;
use state_machine::{CodeExecutor, DBValue, ExecutionStrategy};
use utils::{Meta, db_err, meta_keys, open_database, read_db, read_id, read_meta};
use utils::{Meta, db_err, meta_keys, open_database, read_db, block_id_to_lookup_key, read_meta};
use client::LeafSet;
use state_db::StateDb;
pub use state_db::PruningMode;
@@ -118,6 +118,7 @@ mod columns {
pub const META: Option<u32> = ::utils::COLUMN_META;
pub const STATE: Option<u32> = Some(1);
pub const STATE_META: Option<u32> = Some(2);
/// maps hashes to lookup keys
pub const HASH_LOOKUP: Option<u32> = Some(3);
pub const HEADER: Option<u32> = Some(4);
pub const BODY: Option<u32> = Some(5);
@@ -219,15 +220,20 @@ impl<Block: BlockT> client::blockchain::HeaderBackend<Block> for BlockchainDb<Bl
}
}
fn number(&self, hash: Block::Hash) -> Result<Option<<Block::Header as HeaderT>::Number>, client::error::Error> {
self.header(BlockId::Hash(hash)).and_then(|key| match key {
Some(hdr) => Ok(Some(hdr.number().clone())),
None => Ok(None),
})
fn number(&self, hash: Block::Hash) -> Result<Option<NumberFor<Block>>, client::error::Error> {
if let Some(lookup_key) = block_id_to_lookup_key::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Hash(hash))? {
let number = utils::lookup_key_to_number(&lookup_key)?;
Ok(Some(number))
} else {
Ok(None)
}
}
fn hash(&self, number: <Block::Header as HeaderT>::Number) -> Result<Option<Block::Hash>, client::error::Error> {
read_id::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(number))
fn hash(&self, number: NumberFor<Block>) -> Result<Option<Block::Hash>, client::error::Error> {
self.header(BlockId::Number(number)).and_then(|maybe_header| match maybe_header {
Some(header) => Ok(Some(header.hash().clone())),
None => Ok(None),
})
}
}
@@ -495,12 +501,9 @@ impl<Block: BlockT> Backend<Block> {
let hash = if new_canonical == number_u64 {
hash
} else {
read_id::<Block>(
&*self.blockchain.db,
columns::HASH_LOOKUP,
BlockId::Number(As::sa(new_canonical))
)?.expect("existence of block with number `new_canonical` \
implies existence of blocks with all nubmers before it; qed")
::client::blockchain::HeaderBackend::hash(&self.blockchain, As::sa(new_canonical))?
.expect("existence of block with number `new_canonical` \
implies existence of blocks with all numbers before it; qed")
};
trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash);
@@ -588,14 +591,6 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
let parent_hash = *pending_block.header.parent_hash();
let number = pending_block.header.number().clone();
transaction.put(columns::HEADER, hash.as_ref(), &pending_block.header.encode());
if let Some(body) = pending_block.body {
transaction.put(columns::BODY, hash.as_ref(), &body.encode());
}
if let Some(justification) = pending_block.justification {
transaction.put(columns::JUSTIFICATION, hash.as_ref(), &justification.encode());
}
if pending_block.leaf_state.is_best() {
let meta = self.blockchain.meta.read();
@@ -607,7 +602,7 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
BlockId::Hash(parent_hash),
)?;
// update block number to hash lookup entries.
// uncanonicalize
for retracted in tree_route.retracted() {
if retracted.hash == meta.finalized_hash {
warn!("Potential safety failure: reverting finalized block {:?}",
@@ -616,30 +611,94 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
return Err(::client::error::ErrorKind::NotInFinalizedChain.into());
}
transaction.delete(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(retracted.number)
);
let prev_lookup_key = ::utils::number_to_lookup_key(retracted.number);
let new_lookup_key = ::utils::number_and_hash_to_lookup_key(retracted.number, retracted.hash);
// change mapping from `number -> header`
// to `number + hash -> header`
let retracted_header = if let Some(header) = ::client::blockchain::HeaderBackend::<Block>::header(&self.blockchain, BlockId::Number(retracted.number))? {
header
} else {
return Err(client::error::ErrorKind::UnknownBlock(format!("retracted {:?}", retracted)).into());
};
transaction.delete(columns::HEADER, &prev_lookup_key);
transaction.put(columns::HEADER, &new_lookup_key, &retracted_header.encode());
// if body is stored
// change mapping from `number -> body`
// to `number + hash -> body`
if let Some(retracted_body) = ::client::blockchain::Backend::<Block>::body(&self.blockchain, BlockId::Number(retracted.number))? {
transaction.delete(columns::BODY, &prev_lookup_key);
transaction.put(columns::BODY, &new_lookup_key, &retracted_body.encode());
}
// if justification is stored
// change mapping from `number -> justification`
// to `number + hash -> justification`
if let Some(retracted_justification) = ::client::blockchain::Backend::<Block>::justification(&self.blockchain, BlockId::Number(retracted.number))? {
transaction.delete(columns::JUSTIFICATION, &prev_lookup_key);
transaction.put(columns::JUSTIFICATION, &new_lookup_key, &retracted_justification.encode());
}
transaction.put(columns::HASH_LOOKUP, retracted.hash.as_ref(), &new_lookup_key);
}
// canonicalize
for enacted in tree_route.enacted() {
let hash: &Block::Hash = &enacted.hash;
transaction.put(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(enacted.number),
hash.as_ref(),
)
let prev_lookup_key = ::utils::number_and_hash_to_lookup_key(enacted.number, enacted.hash);
let new_lookup_key = ::utils::number_to_lookup_key(enacted.number);
// change mapping from `number + hash -> header`
// to `number -> header`
let enacted_header = if let Some(header) = ::client::blockchain::HeaderBackend::<Block>::header(&self.blockchain, BlockId::Number(enacted.number))? {
header
} else {
return Err(client::error::ErrorKind::UnknownBlock(format!("enacted {:?}", enacted)).into());
};
transaction.delete(columns::HEADER, &prev_lookup_key);
transaction.put(columns::HEADER, &new_lookup_key, &enacted_header.encode());
// if body is stored
// change mapping from `number + hash -> body`
// to `number -> body`
if let Some(enacted_body) = ::client::blockchain::Backend::<Block>::body(&self.blockchain, BlockId::Number(enacted.number))? {
transaction.delete(columns::BODY, &prev_lookup_key);
transaction.put(columns::BODY, &new_lookup_key, &enacted_body.encode());
}
// if justification is stored
// change mapping from `number -> justification`
// to `number + hash -> justification`
if let Some(enacted_justification) = ::client::blockchain::Backend::<Block>::justification(&self.blockchain, BlockId::Number(enacted.number))? {
transaction.delete(columns::JUSTIFICATION, &prev_lookup_key);
transaction.put(columns::JUSTIFICATION, &new_lookup_key, &enacted_justification.encode());
}
transaction.put(columns::HASH_LOOKUP, enacted.hash.as_ref(), &new_lookup_key);
}
}
transaction.put(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(number),
hash.as_ref()
);
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
}
// blocks in longest chain are keyed by number
let lookup_key = if pending_block.leaf_state.is_best() {
::utils::number_to_lookup_key(number).to_vec()
} else {
// other blocks are keyed by number + hash
::utils::number_and_hash_to_lookup_key(number, hash)
};
transaction.put(columns::HEADER, &lookup_key, &pending_block.header.encode());
if let Some(body) = pending_block.body {
transaction.put(columns::BODY, &lookup_key, &body.encode());
}
if let Some(justification) = pending_block.justification {
transaction.put(columns::JUSTIFICATION, &lookup_key, &justification.encode());
}
transaction.put(columns::HASH_LOOKUP, hash.as_ref(), &lookup_key);
if number == Zero::zero() {
transaction.put(columns::META, meta_keys::FINALIZED_BLOCK, hash.as_ref());
transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref());
@@ -732,14 +791,14 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
match self.storage.state_db.revert_one() {
Some(commit) => {
apply_state_commit(&mut transaction, commit);
let removed = best.clone();
let _removed = best.clone();
best -= As::sa(1);
let header = self.blockchain.header(BlockId::Number(best))?.ok_or_else(
|| client::error::ErrorKind::UnknownBlock(
format!("Error reverting to {}. Block header not found.", best)))?;
transaction.put(columns::META, meta_keys::BEST_BLOCK, header.hash().as_ref());
transaction.delete(columns::HASH_LOOKUP, &::utils::number_to_lookup_key(removed));
transaction.delete(columns::HASH_LOOKUP, header.hash().as_ref());
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(header.hash().clone(), best.clone(), true, false);
self.blockchain.leaves.write().revert(header.hash().clone(), header.number().clone(), header.parent_hash().clone());
@@ -806,11 +865,11 @@ mod tests {
use client::backend::Backend as BTrait;
use client::backend::BlockImportOperation as Op;
use client::blockchain::HeaderBackend as BlockchainHeaderBackend;
use runtime_primitives::testing::{Header, Block as RawBlock};
use runtime_primitives::testing::{Header, Block as RawBlock, ExtrinsicWrapper};
use state_machine::{TrieMut, TrieDBMut, ChangesTrieRootsStorage, ChangesTrieStorage};
use test_client;
type Block = RawBlock<u64>;
type Block = RawBlock<ExtrinsicWrapper<u64>>;
fn prepare_changes(changes: Vec<(Vec<u8>, Vec<u8>)>) -> (H256, MemoryDB<Blake2Hasher>) {
let mut changes_root = H256::default();
+59 -27
View File
@@ -34,7 +34,7 @@ use runtime_primitives::traits::{Block as BlockT, Header as HeaderT,
Zero, One, As, NumberFor};
use cache::{DbCacheSync, DbCache, ComplexBlockId};
use utils::{meta_keys, Meta, db_err, number_to_lookup_key, open_database,
read_db, read_id, read_meta};
read_db, block_id_to_lookup_key, read_meta};
use DatabaseSettings;
pub(crate) mod columns {
@@ -168,14 +168,16 @@ impl<Block> BlockchainHeaderBackend<Block> for LightStorage<Block>
}
fn number(&self, hash: Block::Hash) -> ClientResult<Option<<<Block as BlockT>::Header as HeaderT>::Number>> {
self.header(BlockId::Hash(hash)).and_then(|key| match key {
Some(hdr) => Ok(Some(hdr.number().clone())),
None => Ok(None),
})
if let Some(lookup_key) = block_id_to_lookup_key::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Hash(hash))? {
let number = ::utils::lookup_key_to_number(&lookup_key)?;
Ok(Some(number))
} else {
Ok(None)
}
}
fn hash(&self, number: <<Block as BlockT>::Header as HeaderT>::Number) -> ClientResult<Option<Block::Hash>> {
read_id::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(number))
Ok(self.header(BlockId::Number(number))?.map(|header| header.hash().clone()))
}
}
@@ -212,13 +214,13 @@ impl<Block: BlockT> LightStorage<Block> {
trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number);
while prune_block <= new_cht_end {
let id = read_id::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(prune_block))?;
if let Some(hash) = id {
let lookup_key = number_to_lookup_key(prune_block);
transaction.delete(columns::HASH_LOOKUP, &lookup_key);
transaction.delete(columns::HEADER, hash.as_ref());
if let Some(hash) = self.hash(prune_block)? {
let lookup_key = block_id_to_lookup_key::<Block>(&*self.db, columns::HASH_LOOKUP, BlockId::Number(prune_block))?
.expect("retrieved hash for `prune_block` right above. therefore retrieving lookup key must succeed. q.e.d.");
transaction.delete(columns::HASH_LOOKUP, hash.as_ref());
transaction.delete(columns::HEADER, &lookup_key);
}
prune_block += <<Block as BlockT>::Header as HeaderT>::Number::one();
prune_block += NumberFor::<Block>::one();
}
}
}
@@ -242,8 +244,6 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
let number = *header.number();
let parent_hash = *header.parent_hash();
transaction.put(columns::HEADER, hash.as_ref(), &header.encode());
if leaf_state.is_best() {
// handle reorg.
{
@@ -263,27 +263,55 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
(&retracted.number, &retracted.hash));
}
transaction.delete(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(retracted.number)
);
let prev_lookup_key = ::utils::number_to_lookup_key(retracted.number);
let new_lookup_key = ::utils::number_and_hash_to_lookup_key(retracted.number, retracted.hash);
// change mapping from `number -> header`
// to `number + hash -> header`
let retracted_header = if let Some(header) = self.header(BlockId::Number(retracted.number))? {
header
} else {
return Err(::client::error::ErrorKind::UnknownBlock(format!("retracted {:?}", retracted)).into());
};
transaction.delete(columns::HEADER, &prev_lookup_key);
transaction.put(columns::HEADER, &new_lookup_key, &retracted_header.encode());
transaction.put(columns::HASH_LOOKUP, retracted.hash.as_ref(), &new_lookup_key);
}
for enacted in tree_route.enacted() {
let hash: &Block::Hash = &enacted.hash;
transaction.put(
columns::HASH_LOOKUP,
&::utils::number_to_lookup_key(enacted.number),
hash.as_ref(),
)
let prev_lookup_key = ::utils::number_and_hash_to_lookup_key(enacted.number, enacted.hash);
let new_lookup_key = ::utils::number_to_lookup_key(enacted.number);
// change mapping from `number + hash -> header`
// to `number -> header`
let enacted_header = if let Some(header) = self.header(BlockId::Number(enacted.number))? {
header
} else {
return Err(::client::error::ErrorKind::UnknownBlock(format!("enacted {:?}", enacted)).into());
};
transaction.delete(columns::HEADER, &prev_lookup_key);
transaction.put(columns::HEADER, &new_lookup_key, &enacted_header.encode());
transaction.put(columns::HASH_LOOKUP, enacted.hash.as_ref(), &new_lookup_key);
}
}
}
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
transaction.put(columns::HASH_LOOKUP, &number_to_lookup_key(number), hash.as_ref());
}
// blocks in longest chain are keyed by number
let lookup_key = if leaf_state.is_best() {
::utils::number_to_lookup_key(number).to_vec()
} else {
// other blocks are keyed by number + hash
::utils::number_and_hash_to_lookup_key(number, hash)
};
transaction.put(columns::HEADER, &lookup_key, &header.encode());
transaction.put(columns::HASH_LOOKUP, hash.as_ref(), &lookup_key);
let finalized = match leaf_state {
NewBlockState::Final => true,
_ => false,
@@ -374,10 +402,10 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
#[cfg(test)]
pub(crate) mod tests {
use client::cht;
use runtime_primitives::testing::{H256 as Hash, Header, Block as RawBlock};
use runtime_primitives::testing::{H256 as Hash, Header, Block as RawBlock, ExtrinsicWrapper};
use super::*;
type Block = RawBlock<u32>;
type Block = RawBlock<ExtrinsicWrapper<u32>>;
fn prepare_header(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header {
Header {
@@ -512,6 +540,7 @@ pub(crate) mod tests {
prev_hash = insert_block(&db, &prev_hash, 1 + number, None);
}
assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE) as usize);
assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), (1 + cht::SIZE) as usize);
assert_eq!(db.db.iter(columns::CHT).count(), 0);
// insert next SIZE blocks && ensure that nothing is pruned
@@ -519,12 +548,14 @@ pub(crate) mod tests {
prev_hash = insert_block(&db, &prev_hash, 1 + cht::SIZE + number, None);
}
assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + cht::SIZE) as usize);
assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), (1 + cht::SIZE + cht::SIZE) as usize);
assert_eq!(db.db.iter(columns::CHT).count(), 0);
// insert block #{2 * cht::SIZE + 1} && check that new CHT is created + headers of this CHT are pruned
// nothing is yet finalized, so nothing is pruned.
prev_hash = insert_block(&db, &prev_hash, 1 + cht::SIZE + cht::SIZE, None);
assert_eq!(db.db.iter(columns::HEADER).count(), (2 + cht::SIZE + cht::SIZE) as usize);
assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), (2 + cht::SIZE + cht::SIZE) as usize);
assert_eq!(db.db.iter(columns::CHT).count(), 0);
// now finalize the block.
@@ -533,6 +564,7 @@ pub(crate) mod tests {
}
db.finalize_header(BlockId::Hash(prev_hash)).unwrap();
assert_eq!(db.db.iter(columns::HEADER).count(), (1 + cht::SIZE + 1) as usize);
assert_eq!(db.db.iter(columns::HASH_LOOKUP).count(), (1 + cht::SIZE + 1) as usize);
assert_eq!(db.db.iter(columns::CHT).count(), 1);
assert!((0..cht::SIZE).all(|i| db.db.get(columns::HEADER, &number_to_lookup_key(1 + i)).unwrap().is_none()));
}
+48 -25
View File
@@ -67,10 +67,11 @@ pub struct Meta<N, H> {
}
/// A block lookup key: used for canonical lookup from block number to hash
pub type BlockLookupKey = [u8; 4];
pub type ShortBlockLookupKey = [u8; 4];
/// Convert block number into lookup key (LE representation).
pub fn number_to_lookup_key<N>(n: N) -> BlockLookupKey where N: As<u64> {
/// Convert block number into short lookup key (LE representation) for
/// blocks that are in the canonical chain.
pub fn number_to_lookup_key<N>(n: N) -> ShortBlockLookupKey where N: As<u64> {
let n: u64 = n.as_();
assert!(n & 0xffffffff00000000 == 0);
@@ -82,6 +83,49 @@ pub fn number_to_lookup_key<N>(n: N) -> BlockLookupKey where N: As<u64> {
]
}
/// Convert number and hash into long lookup key for blocks that are
/// not in the canonical chain.
pub fn number_and_hash_to_lookup_key<N, H>(number: N, hash: H) -> Vec<u8> where
N: As<u64>,
H: AsRef<[u8]>
{
let mut lookup_key = number_to_lookup_key(number).to_vec();
lookup_key.extend_from_slice(hash.as_ref());
lookup_key
}
/// Convert block lookup key into block number.
/// all block lookup keys start with the block number.
pub fn lookup_key_to_number<N>(key: &[u8]) -> client::error::Result<N> where N: As<u64> {
if key.len() < 4 {
return Err(client::error::ErrorKind::Backend("Invalid block key".into()).into());
}
Ok((key[0] as u64) << 24
| (key[1] as u64) << 16
| (key[2] as u64) << 8
| (key[3] as u64)).map(As::sa)
}
/// Convert block id to block lookup key.
/// block lookup key is the DB-key header, block and justification are stored under.
/// looks up lookup key by hash from DB as necessary.
pub fn block_id_to_lookup_key<Block>(
db: &KeyValueDB,
hash_lookup_col: Option<u32>,
id: BlockId<Block>
) -> Result<Option<Vec<u8>>, client::error::Error> where
Block: BlockT,
{
match id {
// numbers are solely looked up in canonical chain
BlockId::Number(n) => Ok(Some(number_to_lookup_key(n).to_vec())),
BlockId::Hash(h) => db.get(hash_lookup_col, h.as_ref()).map(|v|
v.map(|v| { v.into_vec() })
).map_err(db_err),
}
}
/// Maps database error to client error
pub fn db_err(err: io::Error) -> client::error::Error {
use std::error::Error;
@@ -113,33 +157,12 @@ pub fn open_database(config: &DatabaseSettings, col_meta: Option<u32>, db_type:
Ok(Arc::new(db))
}
/// Convert block id to block key, looking up canonical hash by number from DB as necessary.
pub fn read_id<Block>(db: &KeyValueDB, col_index: Option<u32>, id: BlockId<Block>) -> Result<Option<Block::Hash>, client::error::Error>
where
Block: BlockT,
{
match id {
BlockId::Hash(h) => Ok(Some(h)),
BlockId::Number(n) => db.get(col_index, &number_to_lookup_key(n)).map(|v|
v.map(|v| {
let mut h = <Block::Hash>::default();
{
let h = h.as_mut();
let len = ::std::cmp::min(v.len(), h.len());
h.as_mut().copy_from_slice(&v[..len]);
}
h
})
).map_err(db_err),
}
}
/// Read database column entry for the given block.
pub fn read_db<Block>(db: &KeyValueDB, col_index: Option<u32>, col: Option<u32>, id: BlockId<Block>) -> client::error::Result<Option<DBValue>>
where
Block: BlockT,
{
read_id(db, col_index, id).and_then(|key| match key {
block_id_to_lookup_key(db, col_index, id).and_then(|key| match key {
Some(key) => db.get(col, key.as_ref()).map_err(db_err),
None => Ok(None),
})
+2 -2
View File
@@ -89,7 +89,7 @@ pub fn build_proof<Header, Hasher, I>(
{
let transaction = build_pairs::<Header, I>(cht_size, cht_num, hashes)?
.into_iter()
.map(|(k, v)| (k, Some(v)))
.map(|(k, v)| (None, k, Some(v)))
.collect::<Vec<_>>();
let storage = InMemoryState::<Hasher>::default().update(transaction);
let (value, proof) = prove_read(storage, &encode_cht_key(block_num)).ok()?;
@@ -205,7 +205,7 @@ pub fn decode_cht_value(value: &[u8]) -> Option<H256> {
32 => Some(H256::from_slice(&value[0..32])),
_ => None,
}
}
#[cfg(test)]
+3 -3
View File
@@ -574,7 +574,7 @@ impl<B, E, Block> Client<B, E, Block> where
for tx in extrinsics {
let tx = api::TaggedTransactionQueue::validate_transaction(self, &id, &tx)?;
match tx {
TransactionValidity::Valid(_, _, mut provides, ..) => {
TransactionValidity::Valid { mut provides, .. } => {
tags.append(&mut provides);
},
// silently ignore invalid extrinsics,
@@ -1128,12 +1128,12 @@ impl<B, E, Block> api::BlockBuilder<Block> for Client<B, E, Block> where
self.call_api_at(at, "inherent_extrinsics", &(inherent))
}
fn check_inherents<InherentData: Encode + Decode>(
fn check_inherents<InherentData: Encode + Decode, InherentError: Encode + Decode>(
&self,
at: &BlockId<Block>,
block: &Block,
data: &InherentData
) -> Result<Result<(), api::BlockBuilderError>, Self::Error> {
) -> Result<Result<(), InherentError>, Self::Error> {
self.call_api_at(at, "check_inherents", &(block, data))
}
+19 -2
View File
@@ -201,7 +201,7 @@ impl<Block, S, F, H> StateBackend<H> for OnDemandState<Block, S, F>
S: BlockchainStorage<Block>,
F: Fetcher<Block>,
H: Hasher,
{
type Error = ClientError;
type Transaction = ();
@@ -227,15 +227,32 @@ impl<Block, S, F, H> StateBackend<H> for OnDemandState<Block, S, F>
.into_future().wait()
}
fn child_storage(&self, _storage_key: &[u8], _key: &[u8]) -> ClientResult<Option<Vec<u8>>> {
Err(ClientErrorKind::NotAvailableOnLightClient.into())
}
fn for_keys_with_prefix<A: FnMut(&[u8])>(&self, _prefix: &[u8], _action: A) {
// whole state is not available on light node
}
fn for_keys_in_child_storage<A: FnMut(&[u8])>(&self, _storage_key: &[u8], _action: A) {
// whole state is not available on light node
}
fn storage_root<I>(&self, _delta: I) -> (H::Out, Self::Transaction)
where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)> {
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>
{
(H::Out::default(), ())
}
fn child_storage_root<I>(&self, _key: &[u8], _delta: I) -> (Vec<u8>, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>
{
(H::Out::default().as_ref().to_vec(), ())
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
// whole state is not available on light node
Vec::new()
+2 -3
View File
@@ -179,11 +179,10 @@ impl<Block: BlockT> StorageNotifications<Block> {
#[cfg(test)]
mod tests {
use runtime_primitives::testing::{H256 as Hash, Block as RawBlock};
use runtime_primitives::testing::{H256 as Hash, Block as RawBlock, ExtrinsicWrapper};
use super::*;
use futures::Stream;
#[cfg(test)]
impl From<Vec<(StorageKey, Option<StorageData>)>> for StorageChangeSet {
fn from(changes: Vec<(StorageKey, Option<StorageData>)>) -> Self {
@@ -201,7 +200,7 @@ mod tests {
}
}
type Block = RawBlock<Hash>;
type Block = RawBlock<ExtrinsicWrapper<Hash>>;
#[test]
fn triggering_change_should_notify_wildcard_listeners() {
+155 -7
View File
@@ -181,6 +181,46 @@ impl_function_executor!(this: FunctionExecutor<'e, E>,
this.ext.set_storage(key, value);
Ok(())
},
ext_set_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, value_data: *const u8, value_len: u32) => {
let storage_key = this.memory.get(storage_key_data, storage_key_len as usize).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_set_child_storage"))?;
let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_set_child_storage"))?;
let value = this.memory.get(value_data, value_len as usize).map_err(|_| UserError("Invalid attempt to determine value in ext_set_child_storage"))?;
if let Some(_preimage) = this.hash_lookup.get(&key) {
debug_trace!(
target: "wasm-trace", "*** Setting child storage: {} -> %{} -> {} [k={}]",
::primitives::hexdisplay::ascii_format(&storage_key),
::primitives::hexdisplay::ascii_format(&_preimage),
HexDisplay::from(&value),
HexDisplay::from(&key)
);
} else {
debug_trace!(
target: "wasm-trace", "*** Setting child storage: {} -> {} -> {} [k={}]",
::primitives::hexdisplay::ascii_format(&storage_key),
::primitives::hexdisplay::ascii_format(&key),
HexDisplay::from(&value),
HexDisplay::from(&key)
);
}
this.ext.set_child_storage(storage_key, key, value);
Ok(())
},
ext_clear_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32) => {
let storage_key = this.memory.get(
storage_key_data,
storage_key_len as usize
).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_clear_child_storage"))?;
let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_clear_child_storage"))?;
debug_trace!(target: "wasm-trace", "*** Clearing child storage: {} -> {} [k={}]",
::primitives::hexdisplay::ascii_format(&storage_key),
if let Some(_preimage) = this.hash_lookup.get(&key) {
format!("%{}", ::primitives::hexdisplay::ascii_format(&_preimage))
} else {
format!(" {}", ::primitives::hexdisplay::ascii_format(&key))
}, HexDisplay::from(&key));
this.ext.clear_child_storage(&storage_key, &key);
Ok(())
},
ext_clear_storage(key_data: *const u8, key_len: u32) => {
let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_clear_storage"))?;
debug_trace!(target: "wasm-trace", "*** Clearing storage: {} [k={}]",
@@ -196,14 +236,33 @@ impl_function_executor!(this: FunctionExecutor<'e, E>,
let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_exists_storage"))?;
Ok(if this.ext.exists_storage(&key) { 1 } else { 0 })
},
ext_exists_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32) -> u32 => {
let storage_key = this.memory.get(
storage_key_data,
storage_key_len as usize
).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_exists_child_storage"))?;
let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_exists_child_storage"))?;
Ok(if this.ext.exists_child_storage(&storage_key, &key) { 1 } else { 0 })
},
ext_clear_prefix(prefix_data: *const u8, prefix_len: u32) => {
let prefix = this.memory.get(prefix_data, prefix_len as usize).map_err(|_| UserError("Invalid attempt to determine prefix in ext_clear_prefix"))?;
this.ext.clear_prefix(&prefix);
Ok(())
},
ext_kill_child_storage(storage_key_data: *const u8, storage_key_len: u32) => {
let storage_key = this.memory.get(
storage_key_data,
storage_key_len as usize
).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_kill_child_storage"))?;
this.ext.kill_child_storage(&storage_key);
Ok(())
},
// return 0 and place u32::max_value() into written_out if no value exists for the key.
ext_get_allocated_storage(key_data: *const u8, key_len: u32, written_out: *mut u32) -> *mut u8 => {
let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to determine key in ext_get_allocated_storage"))?;
let key = this.memory.get(
key_data,
key_len as usize
).map_err(|_| UserError("Invalid attempt to determine key in ext_get_allocated_storage"))?;
let maybe_value = this.ext.storage(&key);
debug_trace!(target: "wasm-trace", "*** Getting storage: {} == {} [k={}]",
@@ -213,9 +272,9 @@ impl_function_executor!(this: FunctionExecutor<'e, E>,
format!(" {}", ::primitives::hexdisplay::ascii_format(&key))
},
if let Some(ref b) = maybe_value {
format!("{}", HexDisplay::from(b))
&format!("{}", HexDisplay::from(b))
} else {
"<empty>".to_owned()
"<empty>"
},
HexDisplay::from(&key)
);
@@ -232,6 +291,45 @@ impl_function_executor!(this: FunctionExecutor<'e, E>,
Ok(0)
}
},
// return 0 and place u32::max_value() into written_out if no value exists for the key.
ext_get_allocated_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, written_out: *mut u32) -> *mut u8 => {
let storage_key = this.memory.get(
storage_key_data,
storage_key_len as usize
).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_get_allocated_child_storage"))?;
let key = this.memory.get(
key_data,
key_len as usize
).map_err(|_| UserError("Invalid attempt to determine key in ext_get_allocated_child_storage"))?;
let maybe_value = this.ext.child_storage(&storage_key, &key);
debug_trace!(target: "wasm-trace", "*** Getting child storage: {} -> {} == {} [k={}]",
::primitives::hexdisplay::ascii_format(&storage_key),
if let Some(_preimage) = this.hash_lookup.get(&key) {
format!("%{}", ::primitives::hexdisplay::ascii_format(&_preimage))
} else {
format!(" {}", ::primitives::hexdisplay::ascii_format(&key))
},
if let Some(ref b) = maybe_value {
&format!("{}", HexDisplay::from(b))
} else {
"<empty>"
},
HexDisplay::from(&key)
);
if let Some(value) = maybe_value {
let offset = this.heap.allocate(value.len() as u32) as u32;
this.memory.set(offset, &value).map_err(|_| UserError("Invalid attempt to set memory in ext_get_allocated_child_storage"))?;
this.memory.write_primitive(written_out, value.len() as u32)
.map_err(|_| UserError("Invalid attempt to write written_out in ext_get_allocated_child_storage"))?;
Ok(offset)
} else {
this.memory.write_primitive(written_out, u32::max_value())
.map_err(|_| UserError("Invalid attempt to write failed written_out in ext_get_allocated_child_storage"))?;
Ok(0)
}
},
// return u32::max_value() if no value exists for the key.
ext_get_storage_into(key_data: *const u8, key_len: u32, value_data: *mut u8, value_len: u32, value_offset: u32) -> u32 => {
let key = this.memory.get(key_data, key_len as usize).map_err(|_| UserError("Invalid attempt to get key in ext_get_storage_into"))?;
@@ -243,9 +341,9 @@ impl_function_executor!(this: FunctionExecutor<'e, E>,
format!(" {}", ::primitives::hexdisplay::ascii_format(&key))
},
if let Some(ref b) = maybe_value {
format!("{}", HexDisplay::from(b))
&format!("{}", HexDisplay::from(b))
} else {
"<empty>".to_owned()
"<empty>"
},
HexDisplay::from(&key)
);
@@ -259,11 +357,61 @@ impl_function_executor!(this: FunctionExecutor<'e, E>,
Ok(u32::max_value())
}
},
// return u32::max_value() if no value exists for the key.
ext_get_child_storage_into(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, value_data: *mut u8, value_len: u32, value_offset: u32) -> u32 => {
let storage_key = this.memory.get(
storage_key_data,
storage_key_len as usize
).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_get_child_storage_into"))?;
let key = this.memory.get(
key_data,
key_len as usize
).map_err(|_| UserError("Invalid attempt to get key in ext_get_child_storage_into"))?;
let maybe_value = this.ext.child_storage(&storage_key, &key);
debug_trace!(target: "wasm-trace", "*** Getting storage: {} -> {} == {} [k={}]",
::primitives::hexdisplay::ascii_format(&storage_key),
if let Some(_preimage) = this.hash_lookup.get(&key) {
format!("%{}", ::primitives::hexdisplay::ascii_format(&_preimage))
} else {
format!(" {}", ::primitives::hexdisplay::ascii_format(&key))
},
if let Some(ref b) = maybe_value {
&format!("{}", HexDisplay::from(b))
} else {
"<empty>"
},
HexDisplay::from(&key)
);
if let Some(value) = maybe_value {
let value = &value[value_offset as usize..];
let written = ::std::cmp::min(value_len as usize, value.len());
this.memory.set(value_data, &value[..written]).map_err(|_| UserError("Invalid attempt to set value in ext_get_child_storage_into"))?;
Ok(written as u32)
} else {
Ok(u32::max_value())
}
},
ext_storage_root(result: *mut u8) => {
let r = this.ext.storage_root();
this.memory.set(result, r.as_ref()).map_err(|_| UserError("Invalid attempt to set memory in ext_storage_root"))?;
Ok(())
},
ext_child_storage_root(storage_key_data: *const u8, storage_key_len: u32, written_out: *mut u32) -> *mut u8 => {
let storage_key = this.memory.get(storage_key_data, storage_key_len as usize).map_err(|_| UserError("Invalid attempt to determine storage_key in ext_child_storage_root"))?;
let r = this.ext.child_storage_root(&storage_key);
if let Some(value) = r {
let offset = this.heap.allocate(value.len() as u32) as u32;
this.memory.set(offset, &value).map_err(|_| UserError("Invalid attempt to set memory in ext_child_storage_root"))?;
this.memory.write_primitive(written_out, value.len() as u32)
.map_err(|_| UserError("Invalid attempt to write written_out in ext_child_storage_root"))?;
Ok(offset)
} else {
this.memory.write_primitive(written_out, u32::max_value())
.map_err(|_| UserError("Invalid attempt to write failed written_out in ext_child_storage_root"))?;
Ok(0)
}
},
ext_storage_changes_root(block: u64, result: *mut u8) -> u32 => {
let r = this.ext.storage_changes_root(block);
if let Some(ref r) = r {
@@ -300,9 +448,9 @@ impl_function_executor!(this: FunctionExecutor<'e, E>,
let hashed_key = twox_128(&key);
debug_trace!(target: "xxhash", "XXhash: {} -> {}",
if let Ok(_skey) = ::std::str::from_utf8(&key) {
_skey.to_owned()
_skey
} else {
format!("{}", HexDisplay::from(&key))
&format!("{}", HexDisplay::from(&key))
},
HexDisplay::from(&hashed_key)
);
@@ -161,7 +161,7 @@ where TSubstream: AsyncRead + AsyncWrite,
// Note that `inner` is wrapped in a `Fuse`, therefore we can poll it forever.
loop {
match self.inner.poll()? {
Async::Ready(Some(mut data)) =>
Async::Ready(Some(data)) =>
return Ok(Async::Ready(Some(data.freeze()))),
Async::Ready(None) =>
if !self.requires_poll_complete && self.send_queue.is_empty() {
+2 -2
View File
@@ -195,10 +195,10 @@ impl<B: BlockT> BlockCollection<B> {
mod test {
use super::{BlockCollection, BlockData, BlockRangeState};
use message;
use runtime_primitives::testing::Block as RawBlock;
use runtime_primitives::testing::{Block as RawBlock, ExtrinsicWrapper};
use primitives::H256;
type Block = RawBlock<u64>;
type Block = RawBlock<ExtrinsicWrapper<u64>>;
fn is_empty(bc: &BlockCollection<Block>) -> bool {
bc.blocks.is_empty() &&
@@ -310,11 +310,11 @@ impl<Block: BlockT> Specialization<Block> for ConsensusGossip<Block> where
#[cfg(test)]
mod tests {
use runtime_primitives::testing::{H256, Block as RawBlock};
use runtime_primitives::testing::{H256, Block as RawBlock, ExtrinsicWrapper};
use std::time::Instant;
use super::*;
type Block = RawBlock<u64>;
type Block = RawBlock<ExtrinsicWrapper<u64>>;
#[test]
fn collects_garbage() {
+1 -1
View File
@@ -516,8 +516,8 @@ impl<B: BlockT, S: Specialization<B>, H: ExHashT> Protocol<B, S, H> {
for (who, ref mut peer) in peers.iter_mut() {
let (hashes, to_send): (Vec<_>, Vec<_>) = extrinsics
.iter()
.cloned()
.filter(|&(ref hash, _)| peer.known_extrinsics.insert(hash.clone()))
.cloned()
.unzip();
if !to_send.is_empty() {
+9
View File
@@ -72,4 +72,13 @@ pub mod well_known_keys {
/// Changes trie configuration is stored under this key.
pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie";
/// Prefix of child storage keys.
pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:";
/// Whether a key is a child storage key.
pub fn is_child_storage_key(key: &[u8]) -> bool {
key.starts_with(CHILD_STORAGE_KEY_PREFIX)
}
}
+1 -1
View File
@@ -127,7 +127,7 @@ impl<B, E, P> AuthorApi<ExHash<P>, BlockHash<P>, ExtrinsicFor<P>, Vec<ExtrinsicF
}
fn pending_extrinsics(&self) -> Result<Vec<ExtrinsicFor<P>>> {
Ok(self.pool.all(usize::max_value()))
Ok(self.pool.ready().map(|tx| tx.data.clone()).collect())
}
fn watch_extrinsic(&self, _metadata: Self::Metadata, subscriber: pubsub::Subscriber<Status<ExHash<P>, BlockHash<P>>>, xt: Bytes) {
+4 -7
View File
@@ -16,7 +16,6 @@
//! Substrate service components.
use std::fmt;
use std::sync::Arc;
use std::marker::PhantomData;
use std::ops::Deref;
@@ -39,7 +38,7 @@ use primitives::{Blake2Hasher};
pub type NetworkService<F> = network::Service<
<F as ServiceFactory>::Block,
<F as ServiceFactory>::NetworkProtocol,
<F as ServiceFactory>::ExtrinsicHash,
<<F as ServiceFactory>::Block as BlockT>::Hash,
>;
/// Code executor type for a factory.
@@ -121,16 +120,14 @@ impl<T: Serialize + DeserializeOwned + BuildStorage> RuntimeGenesis for T {}
pub trait ServiceFactory: 'static + Sized {
/// Block type.
type Block: BlockT;
/// Extrinsic hash type.
type ExtrinsicHash: ::std::hash::Hash + Eq + Copy + fmt::Debug + fmt::LowerHex + Serialize + DeserializeOwned + ::std::str::FromStr + Send + Sync + Default + 'static;
/// Network protocol extensions.
type NetworkProtocol: network::specialization::Specialization<Self::Block>;
/// Chain runtime.
type RuntimeDispatch: NativeExecutionDispatch + Send + Sync + 'static;
/// Extrinsic pool backend type for the full client.
type FullTransactionPoolApi: txpool::ChainApi<Hash = Self::ExtrinsicHash, Block = Self::Block> + Send + 'static;
type FullTransactionPoolApi: txpool::ChainApi<Hash = <Self::Block as BlockT>::Hash, Block = Self::Block> + Send + 'static;
/// Extrinsic pool backend type for the light client.
type LightTransactionPoolApi: txpool::ChainApi<Hash = Self::ExtrinsicHash, Block = Self::Block> + 'static;
type LightTransactionPoolApi: txpool::ChainApi<Hash = <Self::Block as BlockT>::Hash, Block = Self::Block> + 'static;
/// Genesis configuration for the runtime.
type Genesis: RuntimeGenesis;
/// Other configuration for service members.
@@ -202,7 +199,7 @@ pub trait Components: 'static {
type Executor: 'static + client::CallExecutor<FactoryBlock<Self::Factory>, Blake2Hasher> + Send + Sync;
/// Extrinsic pool type.
type TransactionPoolApi: 'static + txpool::ChainApi<
Hash = <Self::Factory as ServiceFactory>::ExtrinsicHash,
Hash = <<Self::Factory as ServiceFactory>::Block as BlockT>::Hash,
Block = FactoryBlock<Self::Factory>
>;
+166 -4
View File
@@ -59,8 +59,9 @@ pub mod chain_ops;
use std::io;
use std::net::SocketAddr;
use std::sync::Arc;
use std::collections::HashMap;
#[doc(hidden)]
pub use std::{ops::Deref, result::Result, sync::Arc};
use futures::prelude::*;
use parking_lot::Mutex;
use keystore::Store as Keystore;
@@ -68,7 +69,8 @@ use client::BlockchainEvents;
use runtime_primitives::traits::{Header, As};
use runtime_primitives::generic::BlockId;
use exit_future::Signal;
use tokio::runtime::TaskExecutor;
#[doc(hidden)]
pub use tokio::runtime::TaskExecutor;
use substrate_executor::NativeExecutor;
use codec::{Encode, Decode};
@@ -393,13 +395,13 @@ impl<C: Components> TransactionPoolAdapter<C> {
impl<C: Components> network::TransactionPool<ComponentExHash<C>, ComponentBlock<C>> for TransactionPoolAdapter<C> {
fn transactions(&self) -> Vec<(ComponentExHash<C>, ComponentExtrinsic<C>)> {
self.pool.ready(|pending| pending
self.pool.ready()
.map(|t| {
let hash = t.hash.clone();
let ex: ComponentExtrinsic<C> = t.data.clone();
(hash, ex)
})
.collect())
.collect()
}
fn import(&self, transaction: &ComponentExtrinsic<C>) -> Option<ComponentExHash<C>> {
@@ -438,3 +440,163 @@ impl<C: Components> network::TransactionPool<ComponentExHash<C>, ComponentBlock<
self.pool.on_broadcasted(propagations)
}
}
/// Creates a simple `Service` implementation.
/// This `Service` just holds an instance to a `service::Service` and implements `Deref`.
/// It also provides a `new` function that takes a `config` and a `TaskExecutor`.
#[macro_export]
macro_rules! construct_simple_service {
(
$name: ident
) => {
pub struct $name<C: $crate::Components> {
inner: $crate::Service<C>,
}
impl<C: $crate::Components> $name<C> {
fn new(
config: FactoryFullConfiguration<C::Factory>,
executor: $crate::TaskExecutor
) -> $crate::Result<Self, $crate::Error> {
Ok(
Self {
inner: $crate::Service::new(config, executor)?
}
)
}
}
impl<C: $crate::Components> $crate::Deref for $name<C> {
type Target = $crate::Service<C>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
}
}
/// Constructs a service factory with the given name that implements the `ServiceFactory` trait.
/// The required parameters are required to be given in the exact order. Some parameters are followed
/// by `{}` blocks. These blocks are required and used to initialize the given parameter.
/// In these block it is required to write a closure that takes the same number of arguments,
/// the corresponding function in the `ServiceFactory` trait provides.
///
/// # Example
///
/// ```nocompile
/// construct_service_factory! {
/// struct Factory {
/// // Declare the block type
/// Block = Block,
/// // Declare the network protocol and give an initializer.
/// NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) },
/// RuntimeDispatch = node_executor::Executor,
/// FullTransactionPoolApi = transaction_pool::ChainApi<FullBackend<Self>, FullExecutor<Self>, Block>
/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) },
/// LightTransactionPoolApi = transaction_pool::ChainApi<LightBackend<Self>, LightExecutor<Self>, Block>
/// { |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) },
/// Genesis = GenesisConfig,
/// Configuration = (),
/// FullService = Service<FullComponents<Self>>
/// { |config, executor| Service::<FullComponents<Factory>>::new(config, executor) },
/// LightService = Service<LightComponents<Self>>
/// { |config, executor| Service::<LightComponents<Factory>>::new(config, executor) },
/// // Declare the import queue. The import queue is special as it takes two initializers.
/// // The first one is for the initializing the full import queue and the second for the
/// // light import queue.
/// ImportQueue = BasicQueue<Block, NoneVerifier>
/// { |_, _| Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) }
/// { |_, _| Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) },
/// }
/// }
/// ```
#[macro_export]
macro_rules! construct_service_factory {
(
$(#[$attr:meta])*
struct $name:ident {
Block = $block:ty,
NetworkProtocol = $protocol:ty { $( $protocol_init:tt )* },
RuntimeDispatch = $dispatch:ty,
FullTransactionPoolApi = $full_transaction:ty { $( $full_transaction_init:tt )* },
LightTransactionPoolApi = $light_transaction:ty { $( $light_transaction_init:tt )* },
Genesis = $genesis:ty,
Configuration = $config:ty,
FullService = $full_service:ty { $( $full_service_init:tt )* },
LightService = $light_service:ty { $( $light_service_init:tt )* },
ImportQueue = $import_queue:ty
{ $( $full_import_queue_init:tt )* }
{ $( $light_import_queue_init:tt )* },
}
) => {
$( #[$attr] )*
pub struct $name {}
#[allow(unused_variables)]
impl $crate::ServiceFactory for $name {
type Block = $block;
type NetworkProtocol = $protocol;
type RuntimeDispatch = $dispatch;
type FullTransactionPoolApi = $full_transaction;
type LightTransactionPoolApi = $light_transaction;
type Genesis = $genesis;
type Configuration = $config;
type FullService = $full_service;
type LightService = $light_service;
type ImportQueue = $import_queue;
fn build_full_transaction_pool(
config: $crate::TransactionPoolOptions,
client: $crate::Arc<$crate::FullClient<Self>>
) -> $crate::Result<$crate::TransactionPool<Self::FullTransactionPoolApi>, $crate::Error>
{
( $( $full_transaction_init )* ) (config, client)
}
fn build_light_transaction_pool(
config: $crate::TransactionPoolOptions,
client: $crate::Arc<$crate::LightClient<Self>>
) -> $crate::Result<$crate::TransactionPool<Self::LightTransactionPoolApi>, $crate::Error>
{
( $( $light_transaction_init )* ) (config, client)
}
fn build_network_protocol(config: &$crate::FactoryFullConfiguration<Self>)
-> $crate::Result<Self::NetworkProtocol, $crate::Error>
{
( $( $protocol_init )* ) (config)
}
fn build_full_import_queue(
config: &$crate::FactoryFullConfiguration<Self>,
client: $crate::Arc<$crate::FullClient<Self>>,
) -> $crate::Result<Self::ImportQueue, $crate::Error> {
( $( $full_import_queue_init )* ) (config, client)
}
fn build_light_import_queue(
config: &FactoryFullConfiguration<Self>,
client: Arc<$crate::LightClient<Self>>,
) -> Result<Self::ImportQueue, $crate::Error> {
( $( $light_import_queue_init )* ) (config, client)
}
fn new_light(
config: $crate::FactoryFullConfiguration<Self>,
executor: $crate::TaskExecutor
) -> $crate::Result<Self::LightService, $crate::Error>
{
( $( $light_service_init )* ) (config, executor)
}
fn new_full(
config: $crate::FactoryFullConfiguration<Self>,
executor: $crate::TaskExecutor
) -> Result<Self::FullService, $crate::Error>
{
( $( $full_service_init )* ) (config, executor)
}
}
}
}
+4 -4
View File
@@ -185,7 +185,7 @@ pub fn connectivity<F: ServiceFactory>(spec: FactoryChainSpec<F>) {
{
let mut network = TestNet::<F>::new(&temp, spec.clone(), NUM_NODES, 0, vec![], 30400);
info!("Checking star topology");
let first_address = network.full_nodes[0].1.network().node_id().unwrap();
let first_address = network.full_nodes[0].1.network().node_id().expect("No node address");
for (_, service) in network.full_nodes.iter().skip(1) {
service.network().add_reserved_peer(first_address.clone()).expect("Error adding reserved peer");
}
@@ -200,10 +200,10 @@ pub fn connectivity<F: ServiceFactory>(spec: FactoryChainSpec<F>) {
{
let mut network = TestNet::<F>::new(&temp, spec, NUM_NODES as u32, 0, vec![], 30400);
info!("Checking linked topology");
let mut address = network.full_nodes[0].1.network().node_id().unwrap();
let mut address = network.full_nodes[0].1.network().node_id().expect("No node address");
for (_, service) in network.full_nodes.iter().skip(1) {
service.network().add_reserved_peer(address.clone()).expect("Error adding reserved peer");
address = service.network().node_id().unwrap();
address = service.network().node_id().expect("No node address");
}
network.run_until_all_full(|_index, service| {
service.network().status().num_peers == NUM_NODES as usize - 1
@@ -247,7 +247,7 @@ where
let best_block = BlockId::number(first_service.client().info().unwrap().chain.best_number);
first_service.transaction_pool().submit_one(&best_block, extrinsic_factory(&first_service)).unwrap();
network.run_until_all_full(|_index, service|
service.transaction_pool().all(usize::max_value()).len() == 1
service.transaction_pool().ready().count() == 1
);
}
-2
View File
@@ -5,7 +5,6 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
parity-codec = { version = "2.1", default-features = false }
parity-codec-derive = { version = "2.1", default-features = false }
sr-std = { path = "../sr-std", default-features = false }
sr-primitives = { path = "../sr-primitives", default-features = false }
sr-version = { path = "../sr-version", default-features = false }
@@ -15,7 +14,6 @@ default = ["std"]
std = [
"sr-std/std",
"parity-codec/std",
"parity-codec-derive/std",
"sr-primitives/std",
"sr-version/std",
]
+1 -14
View File
@@ -22,8 +22,6 @@ extern crate sr_std as rstd;
extern crate sr_primitives as primitives;
#[doc(hidden)]
pub extern crate parity_codec as codec;
#[macro_use]
extern crate parity_codec_derive;
extern crate sr_version as runtime_version;
#[doc(hidden)]
@@ -429,17 +427,6 @@ macro_rules! decl_apis {
};
}
//TODO: Move into runtime!
#[derive(Encode)]
#[cfg_attr(feature = "std", derive(Debug, Decode))]
pub enum BlockBuilderError {
#[cfg(not(feature = "std"))]
Generic(&'static str),
#[cfg(feature = "std")]
Generic(String),
TimestampInFuture(u64),
}
decl_apis! {
/// The `Core` api trait that is mandantory for each runtime.
pub trait Core<Block: BlockT, AuthorityId> {
@@ -482,7 +469,7 @@ decl_apis! {
/// Generate inherent extrinsics.
fn inherent_extrinsics<InherentExtrinsic, UncheckedExtrinsic>(inherent: InherentExtrinsic) -> Vec<UncheckedExtrinsic>;
/// Check that the inherents are valid.
fn check_inherents<InherentData>(block: Block, data: InherentData) -> Result<(), BlockBuilderError>;
fn check_inherents<InherentData, Error>(block: Block, data: InherentData) -> Result<(), Error>;
/// Generate a random seed.
fn random_seed() -> <Block as BlockT>::Hash;
}
+55 -1
View File
@@ -47,6 +47,12 @@ pub fn storage(key: &[u8]) -> Option<Vec<u8>> {
.expect("storage cannot be called outside of an Externalities-provided environment.")
}
/// Get `key` from child storage and return a `Vec`, empty if there's a problem.
pub fn child_storage(storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
ext::with(|ext| ext.child_storage(storage_key, key).map(|s| s.to_vec()))
.expect("storage cannot be called outside of an Externalities-provided environment.")
}
/// Get `key` from storage, placing the value into `value_out` (as much of it as possible) and return
/// the number of bytes that the entry in storage had beyond the offset or None if the storage entry
/// doesn't exist at all. Note that if the buffer is smaller than the storage entry length, the returned
@@ -55,7 +61,20 @@ pub fn read_storage(key: &[u8], value_out: &mut [u8], value_offset: usize) -> Op
ext::with(|ext| ext.storage(key).map(|value| {
let value = &value[value_offset..];
let written = ::std::cmp::min(value.len(), value_out.len());
value_out[0..written].copy_from_slice(&value[0..written]);
value_out[..written].copy_from_slice(&value[..written]);
value.len()
})).expect("read_storage cannot be called outside of an Externalities-provided environment.")
}
/// Get `key` from child storage, placing the value into `value_out` (as much of it as possible) and return
/// the number of bytes that the entry in storage had beyond the offset or None if the storage entry
/// doesn't exist at all. Note that if the buffer is smaller than the storage entry length, the returned
/// number of bytes is not equal to the number of bytes written to the `value_out`.
pub fn read_child_storage(storage_key: &[u8], key: &[u8], value_out: &mut [u8], value_offset: usize) -> Option<usize> {
ext::with(|ext| ext.child_storage(storage_key, key).map(|value| {
let value = &value[value_offset..];
let written = ::std::cmp::min(value.len(), value_out.len());
value_out[..written].copy_from_slice(&value[..written]);
value.len()
})).expect("read_storage cannot be called outside of an Externalities-provided environment.")
}
@@ -67,6 +86,13 @@ pub fn set_storage(key: &[u8], value: &[u8]) {
);
}
/// Set the child storage of a key to some value.
pub fn set_child_storage(storage_key: &[u8], key: &[u8], value: &[u8]) {
ext::with(|ext|
ext.set_child_storage(storage_key.to_vec(), key.to_vec(), value.to_vec())
);
}
/// Clear the storage of a key.
pub fn clear_storage(key: &[u8]) {
ext::with(|ext|
@@ -74,6 +100,13 @@ pub fn clear_storage(key: &[u8]) {
);
}
/// Clear the storage of a key.
pub fn clear_child_storage(storage_key: &[u8], key: &[u8]) {
ext::with(|ext|
ext.clear_child_storage(storage_key, key)
);
}
/// Check whether a given `key` exists in storage.
pub fn exists_storage(key: &[u8]) -> bool {
ext::with(|ext|
@@ -81,6 +114,13 @@ pub fn exists_storage(key: &[u8]) -> bool {
).unwrap_or(false)
}
/// Check whether a given `key` exists in storage.
pub fn exists_child_storage(storage_key: &[u8], key: &[u8]) -> bool {
ext::with(|ext|
ext.exists_child_storage(storage_key, key)
).unwrap_or(false)
}
/// Clear the storage entries with a key that starts with the given prefix.
pub fn clear_prefix(prefix: &[u8]) {
ext::with(|ext|
@@ -88,6 +128,13 @@ pub fn clear_prefix(prefix: &[u8]) {
);
}
/// Clear an entire child storage.
pub fn kill_child_storage(storage_key: &[u8]) {
ext::with(|ext|
ext.kill_child_storage(storage_key)
);
}
/// The current relay chain identifier.
pub fn chain_id() -> u64 {
ext::with(|ext|
@@ -102,6 +149,13 @@ pub fn storage_root() -> H256 {
).unwrap_or(H256::new())
}
/// "Commit" all existing operations and compute the resultant child storage root.
pub fn child_storage_root(storage_key: &[u8]) -> Option<Vec<u8>> {
ext::with(|ext|
ext.child_storage_root(storage_key)
).unwrap_or(None)
}
/// "Commit" all existing operations and get the resultant storage change root.
pub fn storage_changes_root(block: u64) -> Option<H256> {
ext::with(|ext|
+98 -1
View File
@@ -54,16 +54,24 @@ pub extern fn oom(_: ::core::alloc::Layout) -> ! {
}
extern "C" {
fn ext_free(addr: *mut u8);
fn ext_print_utf8(utf8_data: *const u8, utf8_len: u32);
fn ext_print_hex(data: *const u8, len: u32);
fn ext_print_num(value: u64);
fn ext_set_storage(key_data: *const u8, key_len: u32, value_data: *const u8, value_len: u32);
fn ext_set_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, value_data: *const u8, value_len: u32);
fn ext_clear_storage(key_data: *const u8, key_len: u32);
fn ext_clear_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32);
fn ext_exists_storage(key_data: *const u8, key_len: u32) -> u32;
fn ext_exists_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32) -> u32;
fn ext_clear_prefix(prefix_data: *const u8, prefix_len: u32);
fn ext_kill_child_storage(storage_key_data: *const u8, storage_key_len: u32);
fn ext_get_allocated_storage(key_data: *const u8, key_len: u32, written_out: *mut u32) -> *mut u8;
fn ext_get_allocated_child_storage(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, written_out: *mut u32) -> *mut u8;
fn ext_get_storage_into(key_data: *const u8, key_len: u32, value_data: *mut u8, value_len: u32, value_offset: u32) -> u32;
fn ext_get_child_storage_into(storage_key_data: *const u8, storage_key_len: u32, key_data: *const u8, key_len: u32, value_data: *mut u8, value_len: u32, value_offset: u32) -> u32;
fn ext_storage_root(result: *mut u8);
fn ext_child_storage_root(storage_key_data: *const u8, storage_key_len: u32, written_out: *mut u32) -> *mut u8;
fn ext_storage_changes_root(block: u64, result: *mut u8) -> u32;
fn ext_blake2_256_enumerated_trie_root(values_data: *const u8, lens_data: *const u32, lens_len: u32, result: *mut u8);
fn ext_chain_id() -> u64;
@@ -104,7 +112,24 @@ pub fn storage(key: &[u8]) -> Option<Vec<u8>> {
if length == u32::max_value() {
None
} else {
Some(Vec::from_raw_parts(ptr, length as usize, length as usize))
let ret = slice::from_raw_parts(ptr, length as usize).to_vec();
ext_free(ptr);
Some(ret)
}
}
}
/// Get `key` from child storage and return a `Vec`, empty if there's a problem.
pub fn child_storage(storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
let mut length: u32 = 0;
unsafe {
let ptr = ext_get_allocated_child_storage(storage_key.as_ptr(), storage_key.len() as u32, key.as_ptr(), key.len() as u32, &mut length);
if length == u32::max_value() {
None
} else {
let ret = slice::from_raw_parts(ptr, length as usize).to_vec();
ext_free(ptr);
Some(ret)
}
}
}
@@ -119,6 +144,17 @@ pub fn set_storage(key: &[u8], value: &[u8]) {
}
}
/// Set the child storage of some particular key to Some value.
pub fn set_child_storage(storage_key: &[u8], key: &[u8], value: &[u8]) {
unsafe {
ext_set_child_storage(
storage_key.as_ptr(), key.len() as u32,
key.as_ptr(), key.len() as u32,
value.as_ptr(), value.len() as u32
);
}
}
/// Clear the storage of some particular key.
pub fn clear_storage(key: &[u8]) {
unsafe {
@@ -128,6 +164,16 @@ pub fn clear_storage(key: &[u8]) {
}
}
/// Clear the storage of some particular key.
pub fn clear_child_storage(storage_key: &[u8], key: &[u8]) {
unsafe {
ext_clear_child_storage(
storage_key.as_ptr(), storage_key.len() as u32,
key.as_ptr(), key.len() as u32
);
}
}
/// Determine whether a particular key exists in storage.
pub fn exists_storage(key: &[u8]) -> bool {
unsafe {
@@ -137,6 +183,16 @@ pub fn exists_storage(key: &[u8]) -> bool {
}
}
/// Determine whether a particular key exists in storage.
pub fn exists_child_storage(storage_key: &[u8], key: &[u8]) -> bool {
unsafe {
ext_exists_child_storage(
storage_key.as_ptr(), storage_key.len() as u32,
key.as_ptr(), key.len() as u32
) != 0
}
}
/// Clear the storage entries key of which starts with the given prefix.
pub fn clear_prefix(prefix: &[u8]) {
unsafe {
@@ -147,6 +203,16 @@ pub fn clear_prefix(prefix: &[u8]) {
}
}
/// Clear an entire child storage.
pub fn kill_child_storage(storage_key: &[u8]) {
unsafe {
ext_kill_child_storage(
storage_key.as_ptr(),
storage_key.len() as u32
);
}
}
/// Get `key` from storage, placing the value into `value_out` (as much as possible) and return
/// the number of bytes that the key in storage was beyond the offset.
pub fn read_storage(key: &[u8], value_out: &mut [u8], value_offset: usize) -> Option<usize> {
@@ -162,6 +228,22 @@ pub fn read_storage(key: &[u8], value_out: &mut [u8], value_offset: usize) -> Op
}
}
/// Get `key` from child storage, placing the value into `value_out` (as much as possible) and return
/// the number of bytes that the key in storage was beyond the offset.
pub fn read_child_storage(storage_key: &[u8], key: &[u8], value_out: &mut [u8], value_offset: usize) -> Option<usize> {
unsafe {
match ext_get_child_storage_into(
storage_key.as_ptr(), storage_key.len() as u32,
key.as_ptr(), key.len() as u32,
value_out.as_mut_ptr(), value_out.len() as u32,
value_offset as u32
) {
none if none == u32::max_value() => None,
length => Some(length as usize),
}
}
}
/// The current storage's root.
pub fn storage_root() -> [u8; 32] {
let mut result: [u8; 32] = Default::default();
@@ -171,6 +253,21 @@ pub fn storage_root() -> [u8; 32] {
result
}
/// "Commit" all existing operations and compute the resultant child storage root.
pub fn child_storage_root(storage_key: &[u8]) -> Option<Vec<u8>> {
let mut length: u32 = 0;
unsafe {
let ptr = ext_child_storage_root(storage_key.as_ptr(), storage_key.len() as u32, &mut length);
if length == u32::max_value() {
None
} else {
let ret = slice::from_raw_parts(ptr, length as usize).to_vec();
ext_free(ptr);
Some(ret)
}
}
}
/// The current storage' changes root.
pub fn storage_changes_root(block: u64) -> Option<[u8; 32]> {
let mut result: [u8; 32] = Default::default();
-2
View File
@@ -13,7 +13,6 @@ parity-codec-derive = { version = "2.1", default-features = false }
substrate-primitives = { path = "../primitives", default-features = false }
sr-std = { path = "../sr-std", default-features = false }
sr-io = { path = "../sr-io", default-features = false }
sr-version = { path = "../sr-version", default-features = false }
log = {version = "0.4", optional = true }
[dev-dependencies]
@@ -28,7 +27,6 @@ std = [
"log",
"sr-std/std",
"sr-io/std",
"sr-version/std",
"parity-codec/std",
"substrate-primitives/std",
]
@@ -72,7 +72,7 @@ pub struct Block<Header, Extrinsic> {
impl<Header, Extrinsic> traits::Block for Block<Header, Extrinsic>
where
Header: HeaderT,
Extrinsic: Member + Codec,
Extrinsic: Member + Codec + traits::Extrinsic,
{
type Extrinsic = Extrinsic;
type Header = Header;
@@ -102,4 +102,4 @@ pub struct SignedBlock<H, E> {
pub block: Block<H, E>,
/// Block justification.
pub justification: Justification,
}
}
@@ -113,17 +113,11 @@ impl<Hash: Codec + Member, AuthorityId: Codec + Member> traits::DigestItem for D
type AuthorityId = AuthorityId;
fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> {
match *self {
DigestItem::AuthoritiesChange(ref authorities) => Some(authorities),
_ => None,
}
self.dref().as_authorities_change()
}
fn as_changes_trie_root(&self) -> Option<&Hash> {
match *self {
DigestItem::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root),
_ => None,
}
self.dref().as_changes_trie_root()
}
}
@@ -150,6 +144,22 @@ impl<Hash: Decode, AuthorityId: Decode> Decode for DigestItem<Hash, AuthorityId>
}
}
impl<'a, Hash: Codec + Member, AuthorityId: Codec + Member> DigestItemRef<'a, Hash, AuthorityId> {
pub fn as_authorities_change(&self) -> Option<&'a [AuthorityId]> {
match *self {
DigestItemRef::AuthoritiesChange(ref authorities) => Some(authorities),
_ => None,
}
}
pub fn as_changes_trie_root(&self) -> Option<&'a Hash> {
match *self {
DigestItemRef::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root),
_ => None,
}
}
}
impl<'a, Hash: Encode, AuthorityId: Encode> Encode for DigestItemRef<'a, Hash, AuthorityId> {
fn encode(&self) -> Vec<u8> {
let mut v = Vec::new();
@@ -22,7 +22,7 @@ use std::fmt;
use rstd::prelude::*;
use codec::{Decode, Encode, Input};
use traits::{self, Member, SimpleArithmetic, MaybeDisplay, CurrentHeight, BlockNumberToHash, Lookup,
Checkable};
Checkable, Extrinsic};
use super::{CheckedExtrinsic, Era};
const TRANSACTION_VERSION: u8 = 1;
@@ -56,10 +56,11 @@ impl<Address, Index, Call, Signature> UncheckedMortalExtrinsic<Address, Index, C
function,
}
}
}
/// `true` if there is a signature.
pub fn is_signed(&self) -> bool {
self.signature.is_some()
impl<Address, Index, Call, Signature> Extrinsic for UncheckedMortalExtrinsic<Address, Index, Call, Signature> {
fn is_signed(&self) -> Option<bool> {
Some(self.signature.is_some())
}
}
@@ -221,49 +222,49 @@ mod tests {
#[test]
fn unsigned_check_should_work() {
let ux = Ex::new_unsigned(DUMMY_FUNCTION);
assert!(!ux.is_signed());
assert!(!ux.is_signed().unwrap_or(false));
assert!(<Ex as Checkable<TestContext>>::check(ux, &TestContext).is_ok());
}
#[test]
fn badly_signed_check_should_fail() {
let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, vec![0u8]), Era::immortal());
assert!(ux.is_signed());
assert!(ux.is_signed().unwrap_or(false));
assert_eq!(<Ex as Checkable<TestContext>>::check(ux, &TestContext), Err("bad signature in extrinsic"));
}
#[test]
fn immortal_signed_check_should_work() {
let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::immortal(), 0u64).encode()), Era::immortal());
assert!(ux.is_signed());
assert!(ux.is_signed().unwrap_or(false));
assert_eq!(<Ex as Checkable<TestContext>>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: DUMMY_FUNCTION }));
}
#[test]
fn mortal_signed_check_should_work() {
let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::mortal(32, 42), 42u64).encode()), Era::mortal(32, 42));
assert!(ux.is_signed());
assert!(ux.is_signed().unwrap_or(false));
assert_eq!(<Ex as Checkable<TestContext>>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: DUMMY_FUNCTION }));
}
#[test]
fn later_mortal_signed_check_should_work() {
let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::mortal(32, 11), 11u64).encode()), Era::mortal(32, 11));
assert!(ux.is_signed());
assert!(ux.is_signed().unwrap_or(false));
assert_eq!(<Ex as Checkable<TestContext>>::check(ux, &TestContext), Ok(CEx { signed: Some((DUMMY_ACCOUNTID, 0)), function: DUMMY_FUNCTION }));
}
#[test]
fn too_late_mortal_signed_check_should_fail() {
let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::mortal(32, 10), 10u64).encode()), Era::mortal(32, 10));
assert!(ux.is_signed());
assert!(ux.is_signed().unwrap_or(false));
assert_eq!(<Ex as Checkable<TestContext>>::check(ux, &TestContext), Err("bad signature in extrinsic"));
}
#[test]
fn too_early_mortal_signed_check_should_fail() {
let ux = Ex::new_signed(0, DUMMY_FUNCTION, DUMMY_ACCOUNTID, TestSig(DUMMY_ACCOUNTID, (DUMMY_ACCOUNTID, DUMMY_FUNCTION, Era::mortal(32, 43), 43u64).encode()), Era::mortal(32, 43));
assert!(ux.is_signed());
assert!(ux.is_signed().unwrap_or(false));
assert_eq!(<Ex as Checkable<TestContext>>::check(ux, &TestContext), Err("bad signature in extrinsic"));
}
+73 -35
View File
@@ -37,7 +37,6 @@ extern crate num_traits;
extern crate integer_sqrt;
extern crate sr_std as rstd;
extern crate sr_io as runtime_io;
extern crate sr_version as runtime_version;
#[doc(hidden)]
pub extern crate parity_codec as codec;
extern crate substrate_primitives;
@@ -65,6 +64,12 @@ pub type Justification = Vec<u8>;
use traits::{Verify, Lazy};
/// A String that is a `&'static str` on `no_std` and a `String` on `std`.
#[cfg(not(feature = "std"))]
pub type RuntimeString = &'static str;
#[cfg(feature = "std")]
pub type RuntimeString = ::std::borrow::Cow<'static, str>;
#[cfg(feature = "std")]
pub use serde::{Serialize, de::DeserializeOwned};
@@ -264,19 +269,32 @@ pub fn verify_encoded_lazy<V: Verify, T: codec::Encode>(sig: &V, item: &T, signe
#[macro_export]
macro_rules! __impl_outer_config_types {
($concrete:ident $config:ident $snake:ident $($rest:ident)*) => {
(
$concrete:ident $config:ident $snake:ident < $ignore:ident > $( $rest:tt )*
) => {
#[cfg(any(feature = "std", test))]
pub type $config = $snake::GenesisConfig<$concrete>;
__impl_outer_config_types! {$concrete $($rest)*}
};
(
$concrete:ident $config:ident $snake:ident $( $rest:tt )*
) => {
#[cfg(any(feature = "std", test))]
pub type $config = $snake::GenesisConfig;
__impl_outer_config_types! {$concrete $($rest)*}
};
($concrete:ident) => ()
}
#[macro_export]
/// Implement the output "meta" module configuration struct.
macro_rules! impl_outer_config {
( pub struct $main:ident for $concrete:ident { $( $config:ident => $snake:ident, )* } ) => {
__impl_outer_config_types! { $concrete $( $config $snake )* }
(
pub struct $main:ident for $concrete:ident {
$( $config:ident => $snake:ident $( < $generic:ident > )*, )*
}
) => {
__impl_outer_config_types! { $concrete $( $config $snake $( < $generic > )* )* }
#[cfg(any(feature = "std", test))]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@@ -321,7 +339,7 @@ macro_rules! impl_outer_log {
(
$(#[$attr:meta])*
pub enum $name:ident ($internal:ident: DigestItem<$( $genarg:ty ),*>) for $trait:ident {
$( $module:ident($( $item:ident ),*) ),*
$( $module:ident( $( $sitem:ident ),* ) ),*
}
) => {
/// Wrapper for all possible log entries for the `$trait` runtime. Provides binary-compatible
@@ -338,7 +356,7 @@ macro_rules! impl_outer_log {
#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))]
$(#[$attr])*
#[allow(non_camel_case_types)]
enum $internal {
pub enum InternalLog {
$(
$module($module::Log<$trait>),
)*
@@ -352,14 +370,27 @@ macro_rules! impl_outer_log {
fn dref<'a>(&'a self) -> Option<$crate::generic::DigestItemRef<'a, $($genarg),*>> {
match self.0 {
$($(
$internal::$module($module::RawLog::$item(ref v)) =>
Some($crate::generic::DigestItemRef::$item(v)),
$internal::$module($module::RawLog::$sitem(ref v)) =>
Some($crate::generic::DigestItemRef::$sitem(v)),
)*)*
_ => None,
}
}
}
impl $crate::traits::DigestItem for $name {
type Hash = <$crate::generic::DigestItem<$($genarg),*> as $crate::traits::DigestItem>::Hash;
type AuthorityId = <$crate::generic::DigestItem<$($genarg),*> as $crate::traits::DigestItem>::AuthorityId;
fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> {
self.dref().and_then(|dref| dref.as_authorities_change())
}
fn as_changes_trie_root(&self) -> Option<&Self::Hash> {
self.dref().and_then(|dref| dref.as_changes_trie_root())
}
}
impl From<$crate::generic::DigestItem<$($genarg),*>> for $name {
/// Converts `generic::DigestItem` into `$name`. If `generic::DigestItem` represents
/// a system item which is supported by the runtime, it is returned.
@@ -370,8 +401,8 @@ macro_rules! impl_outer_log {
fn from(gen: $crate::generic::DigestItem<$($genarg),*>) -> Self {
match gen {
$($(
$crate::generic::DigestItem::$item(value) =>
$name($internal::$module($module::RawLog::$item(value))),
$crate::generic::DigestItem::$sitem(value) =>
$name($internal::$module($module::RawLog::$sitem(value))),
)*)*
_ => gen.as_other()
.and_then(|value| $crate::codec::Decode::decode(&mut &value[..]))
@@ -412,10 +443,10 @@ macro_rules! impl_outer_log {
}
}
impl From<$module::Log<$trait>> for $internal {
impl From<$module::Log<$trait>> for InternalLog {
/// Converts single module log item into `$internal`.
fn from(x: $module::Log<$trait>) -> Self {
$internal::$module(x)
InternalLog::$module(x)
}
}
)*
@@ -426,6 +457,7 @@ macro_rules! impl_outer_log {
mod tests {
use substrate_primitives::hash::H256;
use codec::{Encode as EncodeHidden, Decode as DecodeHidden};
use traits::DigestItem;
pub trait RuntimeT {
type AuthorityId;
@@ -437,31 +469,31 @@ mod tests {
type AuthorityId = u64;
}
mod a {
use super::RuntimeT;
pub type Log<R> = RawLog<<R as RuntimeT>::AuthorityId>;
#[derive(Serialize, Deserialize, Debug, Encode, Decode, PartialEq, Eq, Clone)]
pub enum RawLog<AuthorityId> { A1(AuthorityId), AuthoritiesChange(Vec<AuthorityId>), A3(AuthorityId) }
}
mod b {
use super::RuntimeT;
pub type Log<R> = RawLog<<R as RuntimeT>::AuthorityId>;
#[derive(Serialize, Deserialize, Debug, Encode, Decode, PartialEq, Eq, Clone)]
pub enum RawLog<AuthorityId> { B1(AuthorityId), B2(AuthorityId) }
}
// TODO try to avoid redundant brackets: a(AuthoritiesChange), b
impl_outer_log! {
pub enum Log(InternalLog: DigestItem<H256, u64>) for Runtime {
a(AuthoritiesChange), b()
}
}
#[test]
fn impl_outer_log_works() {
mod a {
use super::RuntimeT;
pub type Log<R> = RawLog<<R as RuntimeT>::AuthorityId>;
#[derive(Serialize, Deserialize, Debug, Encode, Decode, PartialEq, Eq, Clone)]
pub enum RawLog<AuthorityId> { A1(AuthorityId), AuthoritiesChange(Vec<AuthorityId>), A3(AuthorityId) }
}
mod b {
use super::RuntimeT;
pub type Log<R> = RawLog<<R as RuntimeT>::AuthorityId>;
#[derive(Serialize, Deserialize, Debug, Encode, Decode, PartialEq, Eq, Clone)]
pub enum RawLog<AuthorityId> { B1(AuthorityId), B2(AuthorityId) }
}
// TODO try to avoid redundant brackets: a(AuthoritiesChange), b
impl_outer_log! {
pub enum Log(InternalLog: DigestItem<H256, u64>) for Runtime {
a(AuthoritiesChange), b()
}
}
// encode/decode regular item
let b1: Log = b::RawLog::B1::<u64>(777).into();
let encoded_b1 = b1.encode();
@@ -487,5 +519,11 @@ mod tests {
super::generic::DigestItem::AuthoritiesChange::<H256, u64>(authorities) => assert_eq!(authorities, vec![100, 200, 300]),
_ => panic!("unexpected generic_auth_change: {:?}", generic_auth_change),
}
// check that as-style methods are working with system items
assert!(auth_change.as_authorities_change().is_some());
// check that as-style methods are not working with regular items
assert!(b1.as_authorities_change().is_none());
}
}
+30 -2
View File
@@ -17,7 +17,7 @@
//! Testing utilities.
use serde::{Serialize, de::DeserializeOwned};
use std::fmt::Debug;
use std::{fmt::Debug, ops::Deref};
use codec::Codec;
use traits::{self, Checkable, Applyable, BlakeTwo256};
use generic::DigestItem as GenDigestItem;
@@ -93,13 +93,36 @@ impl traits::Header for Header {
}
}
#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug, Encode, Decode)]
pub struct ExtrinsicWrapper<Xt>(Xt);
impl<Xt> traits::Extrinsic for ExtrinsicWrapper<Xt> {
fn is_signed(&self) -> Option<bool> {
None
}
}
impl<Xt> From<Xt> for ExtrinsicWrapper<Xt> {
fn from(xt: Xt) -> Self {
ExtrinsicWrapper(xt)
}
}
impl<Xt> Deref for ExtrinsicWrapper<Xt> {
type Target = Xt;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Debug, Encode, Decode)]
pub struct Block<Xt> {
pub header: Header,
pub extrinsics: Vec<Xt>,
}
impl<Xt: 'static + Codec + Sized + Send + Sync + Serialize + DeserializeOwned + Clone + Eq + Debug> traits::Block for Block<Xt> {
impl<Xt: 'static + Codec + Sized + Send + Sync + Serialize + DeserializeOwned + Clone + Eq + Debug + traits::Extrinsic> traits::Block for Block<Xt> {
type Extrinsic = Xt;
type Header = Header;
type Hash = <Header as traits::Header>::Hash;
@@ -125,6 +148,11 @@ impl<Call: Codec + Sync + Send + Serialize, Context> Checkable<Context> for Test
type Checked = Self;
fn check(self, _: &Context) -> Result<Self::Checked, &'static str> { Ok(self) }
}
impl<Call: Codec + Sync + Send + Serialize> traits::Extrinsic for TestXt<Call> {
fn is_signed(&self) -> Option<bool> {
None
}
}
impl<Call> Applyable for TestXt<Call> where
Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Serialize + DeserializeOwned,
{
+43 -7
View File
@@ -383,6 +383,17 @@ pub trait MaybeDisplay {}
#[cfg(not(feature = "std"))]
impl<T> MaybeDisplay for T {}
#[cfg(feature = "std")]
pub trait MaybeDecode: ::codec::Decode {}
#[cfg(feature = "std")]
impl<T: ::codec::Decode> MaybeDecode for T {}
#[cfg(not(feature = "std"))]
pub trait MaybeDecode {}
#[cfg(not(feature = "std"))]
impl<T> MaybeDecode for T {}
pub trait Member: Send + Sync + Sized + MaybeSerializeDebug + Eq + PartialEq + Clone + 'static {}
impl<T: Send + Sync + Sized + MaybeSerializeDebug + Eq + PartialEq + Clone + 'static> Member for T {}
@@ -430,7 +441,7 @@ pub trait Header: Clone + Send + Sync + Codec + Eq + MaybeSerializeDebug + 'stat
///
/// You can get an iterator over each of the `extrinsics` and retrieve the `header`.
pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerializeDebug + 'static {
type Extrinsic: Member + Codec;
type Extrinsic: Member + Codec + Extrinsic;
type Header: Header<Hash=Self::Hash>;
type Hash: Member + ::rstd::hash::Hash + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]>;
@@ -527,12 +538,37 @@ pub trait DigestItem: Codec + Member {
type AuthorityId: Member;
/// Returns Some if the entry is the `AuthoritiesChange` entry.
fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> {
None
}
fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]>;
/// Returns Some if the entry is the `ChangesTrieRoot` entry.
fn as_changes_trie_root(&self) -> Option<&Self::Hash> {
None
}
fn as_changes_trie_root(&self) -> Option<&Self::Hash>;
}
/// Something that provides an inherent for a runtime.
pub trait ProvideInherent {
/// The inherent that is provided.
type Inherent: Encode + MaybeDecode;
/// The error used by this trait.
type Error: Encode + MaybeDecode;
/// The call for setting the inherent.
type Call: Encode + MaybeDecode;
/// Create the inherent extrinsics.
///
/// # Return
///
/// Returns a vector with tuples containing the index for the extrinsic and the extrinsic itself.
fn create_inherent_extrinsics(data: Self::Inherent) -> Vec<(u32, Self::Call)>;
/// Check that the given inherent is valid.
fn check_inherent<Block: self::Block, F: Fn(&Block::Extrinsic) -> Option<&Self::Call>>(
block: &Block, data: Self::Inherent, extract_function: &F
) -> Result<(), Self::Error>;
}
/// Something that acts like an `Extrinsic`.
pub trait Extrinsic {
/// Is this `Extrinsic` signed?
/// If no information are available about signed/unsigned, `None` should be returned.
fn is_signed(&self) -> Option<bool> { None }
}
@@ -33,11 +33,11 @@ pub type TransactionTag = Vec<u8>;
#[cfg_attr(feature = "std", derive(Debug))]
pub enum TransactionValidity {
Invalid,
Valid(
/* priority: */TransactionPriority,
/* requires: */Vec<TransactionTag>,
/* provides: */Vec<TransactionTag>,
/* longevity: */TransactionLongevity
),
Valid {
priority: TransactionPriority,
requires: Vec<TransactionTag>,
provides: Vec<TransactionTag>,
longevity: TransactionLongevity
},
Unknown,
}
+2 -1
View File
@@ -15,7 +15,8 @@
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
#[cfg(feature = "nightly")]
extern crate alloc;
#[doc(hidden)]
pub extern crate alloc;
extern "C" {
fn ext_malloc(size: usize) -> *mut u8;
+2
View File
@@ -9,6 +9,7 @@ serde_derive = { version = "1.0", optional = true }
parity-codec = { version = "2.1", default-features = false }
parity-codec-derive = { version = "2.1", default-features = false }
sr-std = { path = "../sr-std", default-features = false }
sr-primitives = { path = "../sr-primitives", default-features = false }
[features]
default = ["std"]
@@ -17,4 +18,5 @@ std = [
"serde_derive",
"parity-codec/std",
"sr-std/std",
"sr-primitives/std",
]
+5 -6
View File
@@ -29,15 +29,14 @@ extern crate sr_std as rstd;
#[macro_use]
extern crate parity_codec_derive;
extern crate sr_primitives as runtime_primitives;
#[cfg(feature = "std")]
use std::fmt;
#[cfg(feature = "std")]
use std::collections::HashSet;
#[cfg(feature = "std")]
pub type VersionString = ::std::borrow::Cow<'static, str>;
#[cfg(not(feature = "std"))]
pub type VersionString = &'static str;
use runtime_primitives::RuntimeString;
/// The identity of a particular API interface that the runtime might provide.
pub type ApiId = [u8; 8];
@@ -80,14 +79,14 @@ pub struct RuntimeVersion {
/// Identifies the different Substrate runtimes. There'll be at least polkadot and node.
/// A different on-chain spec_name to that of the native runtime would normally result
/// in node not attempting to sync or author blocks.
pub spec_name: VersionString,
pub spec_name: RuntimeString,
/// Name of the implementation of the spec. This is of little consequence for the node
/// and serves only to differentiate code of different implementation teams. For this
/// codebase, it will be parity-polkadot. If there were a non-Rust implementation of the
/// Polkadot runtime (e.g. C++), then it would identify itself with an accordingly different
/// `impl_name`.
pub impl_name: VersionString,
pub impl_name: RuntimeString,
/// `authoring_version` is the version of the authorship interface. An authoring node
/// will not attempt to author blocks unless this is equal to its native runtime.
+113 -16
View File
@@ -23,7 +23,7 @@ use std::marker::PhantomData;
use hash_db::Hasher;
use trie_backend::TrieBackend;
use trie_backend_essence::TrieBackendStorage;
use substrate_trie::{TrieDBMut, TrieMut, MemoryDB, trie_root};
use substrate_trie::{TrieDBMut, TrieMut, MemoryDB, trie_root, child_trie_root};
use heapsize::HeapSizeOf;
/// A state backend is used to read state data and can have changes committed
@@ -35,7 +35,7 @@ pub trait Backend<H: Hasher> {
type Error: super::Error;
/// Storage changes to be applied if committing
type Transaction;
type Transaction: Consolidate + Default;
/// Type of trie backend storage.
type TrieBackendStorage: TrieBackendStorage<H>;
@@ -43,11 +43,22 @@ pub trait Backend<H: Hasher> {
/// Get keyed storage associated with specific address, or None if there is nothing associated.
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// Get keyed child storage associated with specific address, or None if there is nothing associated.
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error>;
/// true if a key exists in storage.
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.storage(key)?.is_some())
}
/// true if a key exists in child storage.
fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.child_storage(storage_key, key)?.is_some())
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F);
/// Retrieve all entries keys of which start with the given prefix and
/// call `f` for each of those keys.
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F);
@@ -59,6 +70,13 @@ pub trait Backend<H: Hasher> {
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Calculate the child storage root, with given delta over what is already stored in
/// the backend, and produce a "transaction" that can be used to commit.
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord;
/// Get all key/value pairs into a Vec.
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)>;
@@ -66,6 +84,30 @@ pub trait Backend<H: Hasher> {
fn try_into_trie_backend(self) -> Option<TrieBackend<Self::TrieBackendStorage, H>>;
}
/// Trait that allows consolidate two transactions together.
pub trait Consolidate {
/// Consolidate two transactions into one.
fn consolidate(&mut self, other: Self);
}
impl Consolidate for () {
fn consolidate(&mut self, _: Self) {
()
}
}
impl Consolidate for Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)> {
fn consolidate(&mut self, mut other: Self) {
self.append(&mut other);
}
}
impl<H: Hasher> Consolidate for MemoryDB<H> {
fn consolidate(&mut self, other: Self) {
MemoryDB::consolidate(self, other)
}
}
/// Error impossible.
// TODO: use `!` type when stabilized.
#[derive(Debug)]
@@ -85,7 +127,7 @@ impl error::Error for Void {
/// tests.
#[derive(Eq)]
pub struct InMemory<H> {
inner: HashMap<Vec<u8>, Vec<u8>>,
inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>,
_hasher: PhantomData<H>,
}
@@ -117,10 +159,10 @@ impl<H: Hasher> InMemory<H> where H::Out: HeapSizeOf {
/// Copy the state, with applied updates
pub fn update(&self, changes: <Self as Backend<H>>::Transaction) -> Self {
let mut inner: HashMap<_, _> = self.inner.clone();
for (key, val) in changes {
for (storage_key, key, val) in changes {
match val {
Some(v) => { inner.insert(key, v); },
None => { inner.remove(&key); },
Some(v) => { inner.entry(storage_key).or_default().insert(key, v); },
None => { inner.entry(storage_key).or_default().remove(&key); },
}
}
@@ -128,8 +170,8 @@ impl<H: Hasher> InMemory<H> where H::Out: HeapSizeOf {
}
}
impl<H> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
impl<H> From<HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>> for InMemory<H> {
fn from(inner: HashMap<Option<Vec<u8>>, HashMap<Vec<u8>, Vec<u8>>>) -> Self {
InMemory {
inner: inner,
_hasher: PhantomData,
@@ -137,23 +179,42 @@ impl<H> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
}
}
impl<H> From<HashMap<Vec<u8>, Vec<u8>>> for InMemory<H> {
fn from(inner: HashMap<Vec<u8>, Vec<u8>>) -> Self {
let mut expanded = HashMap::new();
expanded.insert(None, inner);
InMemory {
inner: expanded,
_hasher: PhantomData,
}
}
}
impl super::Error for Void {}
impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: HeapSizeOf {
type Error = Void;
type Transaction = Vec<(Vec<u8>, Option<Vec<u8>>)>;
type Transaction = Vec<(Option<Vec<u8>>, Vec<u8>, Option<Vec<u8>>)>;
type TrieBackendStorage = MemoryDB<H>;
fn storage(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(key).map(Clone::clone))
Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone)))
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
Ok(self.inner.get(&Some(storage_key.to_vec())).and_then(|map| map.get(key).map(Clone::clone)))
}
fn exists_storage(&self, key: &[u8]) -> Result<bool, Self::Error> {
Ok(self.inner.get(key).is_some())
Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false))
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.inner.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f);
self.inner.get(&None).map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f));
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], mut f: F) {
self.inner.get(&Some(storage_key.to_vec())).map(|map| map.keys().for_each(|k| f(&k)));
}
fn storage_root<I>(&self, delta: I) -> (H::Out, Self::Transaction)
@@ -161,7 +222,7 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: HeapSizeOf {
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
<H as Hasher>::Out: Ord,
{
let existing_pairs = self.inner.iter().map(|(k, v)| (k.clone(), Some(v.clone())));
let existing_pairs = self.inner.get(&None).into_iter().flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = trie_root::<H, _, _, _>(existing_pairs.chain(transaction.iter().cloned())
@@ -170,16 +231,52 @@ impl<H: Hasher> Backend<H> for InMemory<H> where H::Out: HeapSizeOf {
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
(root, transaction)
let full_transaction = transaction.into_iter().map(|(k, v)| (None, k, v)).collect();
(root, full_transaction)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
let storage_key = storage_key.to_vec();
let existing_pairs = self.inner.get(&Some(storage_key.clone())).into_iter().flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone()))));
let transaction: Vec<_> = delta.into_iter().collect();
let root = child_trie_root::<H, _, _, _>(
&storage_key,
existing_pairs.chain(transaction.iter().cloned())
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
);
let full_transaction = transaction.into_iter().map(|(k, v)| (Some(storage_key.clone()), k, v)).collect();
(root, full_transaction)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
self.inner.iter().map(|(k, v)| (k.clone(), v.clone())).collect()
self.inner.get(&None).into_iter().flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))).collect()
}
fn try_into_trie_backend(self) -> Option<TrieBackend<Self::TrieBackendStorage, H>> {
let mut mdb = MemoryDB::default(); // TODO: should be more correct and use ::new()
let root = insert_into_memory_db::<H, _>(&mut mdb, self.inner.into_iter())?;
let mut root = None;
for (storage_key, map) in self.inner {
if storage_key != None {
let _ = insert_into_memory_db::<H, _>(&mut mdb, map.into_iter())?;
} else {
root = Some(insert_into_memory_db::<H, _>(&mut mdb, map.into_iter())?);
}
}
let root = match root {
Some(root) => root,
None => insert_into_memory_db::<H, _>(&mut mdb, ::std::iter::empty())?,
};
Some(TrieBackend::new(mdb, root))
}
}
@@ -73,10 +73,10 @@ fn prepare_extrinsics_input<B, H>(
where
B: Backend<H>,
H: Hasher,
{
let mut extrinsic_map = BTreeMap::<Vec<u8>, BTreeSet<u32>>::new();
for (key, val) in changes.prospective.iter().chain(changes.committed.iter()) {
for (key, val) in changes.prospective.top.iter().chain(changes.committed.top.iter()) {
let extrinsics = match val.extrinsics {
Some(ref extrinsics) => extrinsics,
None => continue,
@@ -274,7 +274,7 @@ mod test {
let (backend, storage, mut changes) = prepare_for_build();
// 110: missing from backend, set to None in overlay
changes.prospective.insert(vec![110], OverlayedValue {
changes.prospective.top.insert(vec![110], OverlayedValue {
value: None,
extrinsics: Some(vec![1].into_iter().collect())
});
@@ -31,6 +31,9 @@
//! block }, containing entries for every storage key that has been changed in
//! the last N*digest_level-1 blocks (except for genesis block), mapping these keys
//! to the set of lower-level digest blocks.
//!
//! Changes trie only contains the top level storage changes. Sub-level changes
//! are propogated through its storage root on the top level storage.
mod build;
mod build_iterator;
+102 -10
View File
@@ -17,11 +17,12 @@
//! Conrete externalities implementation.
use std::{error, fmt, cmp::Ord};
use backend::Backend;
use backend::{Backend, Consolidate};
use changes_trie::{Storage as ChangesTrieStorage, compute_changes_trie_root};
use {Externalities, OverlayedChanges};
use hash_db::Hasher;
use substrate_trie::{MemoryDB, TrieDBMut, TrieMut};
use primitives::storage::well_known_keys::is_child_storage_key;
use substrate_trie::{MemoryDB, TrieDBMut, TrieMut, default_child_trie_root, is_child_trie_key_valid};
use heapsize::HeapSizeOf;
const EXT_NOT_ALLOWED_TO_FAIL: &'static str = "Externalities not allowed to fail within runtime";
@@ -122,6 +123,31 @@ where
fn mark_dirty(&mut self) {
self.storage_transaction = None;
}
/// Fetch child storage root together with its transaction.
fn child_storage_root_transaction(&mut self, storage_key: &[u8]) -> (Vec<u8>, B::Transaction) {
self.mark_dirty();
let (root, transaction) = {
let delta = self.overlay.committed.children.get(storage_key)
.into_iter()
.flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone())))
.chain(self.overlay.prospective.children.get(storage_key)
.into_iter()
.flat_map(|map| map.1.iter().map(|(k, v)| (k.clone(), v.clone()))));
self.backend.child_storage_root(storage_key, delta)
};
let root_val = if root == default_child_trie_root::<H>(storage_key) {
None
} else {
Some(root.clone())
};
self.overlay.sync_child_storage_root(storage_key, root_val);
(root, transaction)
}
}
#[cfg(test)]
@@ -137,8 +163,8 @@ where
self.backend.pairs().iter()
.map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec())))
.chain(self.overlay.committed.clone().into_iter().map(|(k, v)| (k, v.value)))
.chain(self.overlay.prospective.clone().into_iter().map(|(k, v)| (k, v.value)))
.chain(self.overlay.committed.top.clone().into_iter().map(|(k, v)| (k, v.value)))
.chain(self.overlay.prospective.top.clone().into_iter().map(|(k, v)| (k, v.value)))
.collect::<HashMap<_, _>>()
.into_iter()
.filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val)))
@@ -158,6 +184,11 @@ where
self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL))
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>> {
self.overlay.child_storage(storage_key, key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(||
self.backend.child_storage(storage_key, key).expect(EXT_NOT_ALLOWED_TO_FAIL))
}
fn exists_storage(&self, key: &[u8]) -> bool {
match self.overlay.storage(key) {
Some(x) => x.is_some(),
@@ -165,12 +196,52 @@ where
}
}
fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> bool {
match self.overlay.child_storage(storage_key, key) {
Some(x) => x.is_some(),
_ => self.backend.exists_child_storage(storage_key, key).expect(EXT_NOT_ALLOWED_TO_FAIL),
}
}
fn place_storage(&mut self, key: Vec<u8>, value: Option<Vec<u8>>) {
if is_child_storage_key(&key) {
warn!(target: "trie", "Refuse to directly set child storage key");
return;
}
self.mark_dirty();
self.overlay.set_storage(key, value);
}
fn place_child_storage(&mut self, storage_key: Vec<u8>, key: Vec<u8>, value: Option<Vec<u8>>) -> bool {
if !is_child_storage_key(&storage_key) || !is_child_trie_key_valid::<H>(&storage_key) {
return false;
}
self.mark_dirty();
self.overlay.set_child_storage(storage_key, key, value);
true
}
fn kill_child_storage(&mut self, storage_key: &[u8]) {
if !is_child_storage_key(storage_key) || !is_child_trie_key_valid::<H>(storage_key) {
return;
}
self.mark_dirty();
self.overlay.clear_child_storage(storage_key);
self.backend.for_keys_in_child_storage(storage_key, |key| {
self.overlay.set_child_storage(storage_key.to_vec(), key.to_vec(), None);
});
}
fn clear_prefix(&mut self, prefix: &[u8]) {
if is_child_storage_key(prefix) {
warn!(target: "trie", "Refuse to directly clear prefix that is part of child storage key");
return;
}
self.mark_dirty();
self.overlay.clear_prefix(prefix);
self.backend.for_keys_with_prefix(prefix, |key| {
@@ -183,19 +254,40 @@ where
}
fn storage_root(&mut self) -> H::Out {
if let Some((_, ref root)) = self.storage_transaction {
if let Some((_, ref root)) = self.storage_transaction {
return root.clone();
}
// compute and memoize
let delta = self.overlay.committed.iter().map(|(k, v)| (k.clone(), v.value.clone()))
.chain(self.overlay.prospective.iter().map(|(k, v)| (k.clone(), v.value.clone())));
let mut transaction = B::Transaction::default();
let child_storage_keys: Vec<_> = self.overlay.prospective.children.keys().cloned().collect();
let (root, transaction) = self.backend.storage_root(delta);
for key in child_storage_keys {
let (_, t) = self.child_storage_root_transaction(&key);
transaction.consolidate(t);
}
// compute and memoize
let delta = self.overlay.committed.top.iter().map(|(k, v)| (k.clone(), v.value.clone()))
.chain(self.overlay.prospective.top.iter().map(|(k, v)| (k.clone(), v.value.clone())));
let (root, t) = self.backend.storage_root(delta);
transaction.consolidate(t);
self.storage_transaction = Some((transaction, root));
root
}
fn child_storage_root(&mut self, storage_key: &[u8]) -> Option<Vec<u8>> {
if !is_child_storage_key(storage_key) || !is_child_trie_key_valid::<H>(storage_key) {
return None;
}
if self.storage_transaction.is_some() {
return Some(self.storage(storage_key).unwrap_or(default_child_trie_root::<H>(storage_key)));
}
Some(self.child_storage_root_transaction(storage_key).0)
}
fn storage_changes_root(&mut self, block: u64) -> Option<H::Out> {
let root_and_tx = compute_changes_trie_root::<_, T, H>(
self.backend,
@@ -287,7 +379,7 @@ mod tests {
#[test]
fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() {
let mut overlay = prepare_overlay_with_changes();
overlay.prospective.get_mut(&vec![1]).unwrap().value = None;
overlay.prospective.top.get_mut(&vec![1]).unwrap().value = None;
let storage = TestChangesTrieStorage::new();
let backend = TestBackend::default();
let mut ext = TestExt::new(&mut overlay, &backend, Some(&storage));
+49 -6
View File
@@ -97,33 +97,62 @@ pub trait Externalities<H: Hasher> {
/// Read storage of current contract being called.
fn storage(&self, key: &[u8]) -> Option<Vec<u8>>;
/// Read child storage of current contract being called.
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option<Vec<u8>>;
/// Set storage entry `key` of current contract being called (effective immediately).
fn set_storage(&mut self, key: Vec<u8>, value: Vec<u8>) {
self.place_storage(key, Some(value));
}
/// Set child storage entry `key` of current contract being called (effective immediately).
fn set_child_storage(&mut self, storage_key: Vec<u8>, key: Vec<u8>, value: Vec<u8>) -> bool {
self.place_child_storage(storage_key, key, Some(value))
}
/// Clear a storage entry (`key`) of current contract being called (effective immediately).
fn clear_storage(&mut self, key: &[u8]) {
self.place_storage(key.to_vec(), None);
}
/// Clear a storage entry (`key`) of current contract being called (effective immediately).
/// Clear a child storage entry (`key`) of current contract being called (effective immediately).
fn clear_child_storage(&mut self, storage_key: &[u8], key: &[u8]) -> bool {
self.place_child_storage(storage_key.to_vec(), key.to_vec(), None)
}
/// Whether a storage entry exists.
fn exists_storage(&self, key: &[u8]) -> bool {
self.storage(key).is_some()
}
/// Whether a child storage entry exists.
fn exists_child_storage(&self, storage_key: &[u8], key: &[u8]) -> bool {
self.child_storage(storage_key, key).is_some()
}
/// Clear an entire child storage.
fn kill_child_storage(&mut self, storage_key: &[u8]);
/// Clear storage entries which keys are start with the given prefix.
fn clear_prefix(&mut self, prefix: &[u8]);
/// Set or clear a storage entry (`key`) of current contract being called (effective immediately).
fn place_storage(&mut self, key: Vec<u8>, value: Option<Vec<u8>>);
/// Set or clear a child storage entry. Return whether the operation succeeds.
fn place_child_storage(&mut self, storage_key: Vec<u8>, key: Vec<u8>, value: Option<Vec<u8>>) -> bool;
/// Get the identity of the chain.
fn chain_id(&self) -> u64;
/// Get the trie root of the current storage map.
/// Get the trie root of the current storage map. This will also update all child storage keys in the top-level storage map.
fn storage_root(&mut self) -> H::Out where H::Out: Ord;
/// Get the trie root of a child storage map. This will also update the value of the child storage keys in the top-level storage map. If the storage root equals default hash as defined by trie, the key in top-level storage map will be removed.
///
/// Returns None if key provided is not a storage key. This can due to not being started with CHILD_STORAGE_KEY_PREFIX, or the trie implementation regards the key as invalid.
fn child_storage_root(&mut self, storage_key: &[u8]) -> Option<Vec<u8>>;
/// Get the change trie root of the current storage overlay at given block.
fn storage_changes_root(&mut self, block: u64) -> Option<H::Out> where H::Out: Ord;
}
@@ -477,6 +506,7 @@ where
mod tests {
use std::collections::HashMap;
use codec::Encode;
use overlayed_changes::OverlayedValue;
use super::*;
use super::backend::InMemory;
use super::ext::Ext;
@@ -600,12 +630,12 @@ mod tests {
let backend = InMemory::<Blake2Hasher>::from(initial).try_into_trie_backend().unwrap();
let mut overlay = OverlayedChanges {
committed: map![
b"aba".to_vec() => Some(b"1312".to_vec()).into(),
b"bab".to_vec() => Some(b"228".to_vec()).into()
b"aba".to_vec() => OverlayedValue::from(Some(b"1312".to_vec())),
b"bab".to_vec() => OverlayedValue::from(Some(b"228".to_vec()))
],
prospective: map![
b"abd".to_vec() => Some(b"69".to_vec()).into(),
b"bbd".to_vec() => Some(b"42".to_vec()).into()
b"abd".to_vec() => OverlayedValue::from(Some(b"69".to_vec())),
b"bbd".to_vec() => OverlayedValue::from(Some(b"42".to_vec()))
],
..Default::default()
};
@@ -631,6 +661,19 @@ mod tests {
);
}
#[test]
fn set_child_storage_works() {
let backend = InMemory::<Blake2Hasher>::default().try_into_trie_backend().unwrap();
let changes_trie_storage = InMemoryChangesTrieStorage::new();
let mut overlay = OverlayedChanges::default();
let mut ext = Ext::new(&mut overlay, &backend, Some(&changes_trie_storage));
assert!(ext.set_child_storage(b":child_storage:testchild".to_vec(), b"abc".to_vec(), b"def".to_vec()));
assert_eq!(ext.child_storage(b":child_storage:testchild", b"abc"), Some(b"def".to_vec()));
ext.kill_child_storage(b":child_storage:testchild");
assert_eq!(ext.child_storage(b":child_storage:testchild", b"abc"), None);
}
#[test]
fn prove_read_and_proof_check_works() {
// fetch read proof from 'remote' full node
@@ -16,6 +16,7 @@
//! The overlayed changes to state.
#[cfg(test)] use std::iter::FromIterator;
use std::collections::{HashMap, HashSet};
use codec::Decode;
use changes_trie::{NO_EXTRINSIC_INDEX, Configuration as ChangesTrieConfig};
@@ -28,9 +29,9 @@ use primitives::storage::well_known_keys::EXTRINSIC_INDEX;
#[derive(Debug, Default, Clone)]
pub struct OverlayedChanges {
/// Changes that are not yet committed.
pub(crate) prospective: HashMap<Vec<u8>, OverlayedValue>,
pub(crate) prospective: OverlayedChangeSet,
/// Committed changes.
pub(crate) committed: HashMap<Vec<u8>, OverlayedValue>,
pub(crate) committed: OverlayedChangeSet,
/// Changes trie configuration. None by default, but could be installed by the
/// runtime if it supports change tries.
pub(crate) changes_trie_config: Option<ChangesTrieConfig>,
@@ -47,6 +48,39 @@ pub struct OverlayedValue {
pub extrinsics: Option<HashSet<u32>>,
}
/// Prospective or committed overlayed change set.
#[derive(Debug, Default, Clone)]
#[cfg_attr(test, derive(PartialEq))]
pub struct OverlayedChangeSet {
/// Top level storage changes.
pub top: HashMap<Vec<u8>, OverlayedValue>,
/// Child storage changes.
pub children: HashMap<Vec<u8>, (Option<HashSet<u32>>, HashMap<Vec<u8>, Option<Vec<u8>>>)>,
}
#[cfg(test)]
impl FromIterator<(Vec<u8>, OverlayedValue)> for OverlayedChangeSet {
fn from_iter<T: IntoIterator<Item = (Vec<u8>, OverlayedValue)>>(iter: T) -> Self {
Self {
top: iter.into_iter().collect(),
children: Default::default(),
}
}
}
impl OverlayedChangeSet {
/// Whether the change set is empty.
pub fn is_empty(&self) -> bool {
self.top.is_empty() && self.children.is_empty()
}
/// Clear the change set.
pub fn clear(&mut self) {
self.top.clear();
self.children.clear();
}
}
impl OverlayedChanges {
/// Sets the changes trie configuration.
///
@@ -68,17 +102,36 @@ impl OverlayedChanges {
/// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose
/// value has been set.
pub fn storage(&self, key: &[u8]) -> Option<Option<&[u8]>> {
self.prospective.get(key)
.or_else(|| self.committed.get(key))
self.prospective.top.get(key)
.or_else(|| self.committed.top.get(key))
.map(|x| x.value.as_ref().map(AsRef::as_ref))
}
/// Returns a double-Option: None if the key is unknown (i.e. and the query should be refered
/// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose
/// value has been set.
pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option<Option<&[u8]>> {
if let Some(map) = self.prospective.children.get(storage_key) {
if let Some(val) = map.1.get(key) {
return Some(val.as_ref().map(AsRef::as_ref));
}
}
if let Some(map) = self.committed.children.get(storage_key) {
if let Some(val) = map.1.get(key) {
return Some(val.as_ref().map(AsRef::as_ref));
}
}
None
}
/// Inserts the given key-value pair into the prospective change set.
///
/// `None` can be used to delete a value specified by the given key.
pub(crate) fn set_storage(&mut self, key: Vec<u8>, val: Option<Vec<u8>>) {
let extrinsic_index = self.extrinsic_index();
let entry = self.prospective.entry(key).or_default();
let entry = self.prospective.top.entry(key).or_default();
entry.value = val;
if let Some(extrinsic) = extrinsic_index {
@@ -87,6 +140,57 @@ impl OverlayedChanges {
}
}
/// Inserts the given key-value pair into the prospective child change set.
///
/// `None` can be used to delete a value specified by the given key.
pub(crate) fn set_child_storage(&mut self, storage_key: Vec<u8>, key: Vec<u8>, val: Option<Vec<u8>>) {
let extrinsic_index = self.extrinsic_index();
let map_entry = self.prospective.children.entry(storage_key).or_default();
map_entry.1.insert(key, val);
if let Some(extrinsic) = extrinsic_index {
map_entry.0.get_or_insert_with(Default::default)
.insert(extrinsic);
}
}
/// Sync the child storage root.
pub(crate) fn sync_child_storage_root(&mut self, storage_key: &[u8], root: Option<Vec<u8>>) {
let entry = self.prospective.top.entry(storage_key.to_vec()).or_default();
entry.value = root;
if let Some((Some(extrinsics), _)) = self.prospective.children.get(storage_key) {
for extrinsic in extrinsics {
entry.extrinsics.get_or_insert_with(Default::default)
.insert(*extrinsic);
}
}
}
/// Clear child storage of given storage key.
///
/// NOTE that this doesn't take place immediately but written into the prospective
/// change set, and still can be reverted by [`discard_prospective`].
///
/// [`discard_prospective`]: #method.discard_prospective
pub(crate) fn clear_child_storage(&mut self, storage_key: &[u8]) {
let extrinsic_index = self.extrinsic_index();
let map_entry = self.prospective.children.entry(storage_key.to_vec()).or_default();
if let Some(extrinsic) = extrinsic_index {
map_entry.0.get_or_insert_with(Default::default)
.insert(extrinsic);
}
map_entry.1.values_mut().for_each(|e| *e = None);
if let Some((_, committed_map)) = self.committed.children.get(storage_key) {
for (key, _) in committed_map.iter() {
map_entry.1.insert(key.clone(), None);
}
}
}
/// Removes all key-value pairs which keys share the given prefix.
///
/// NOTE that this doesn't take place immediately but written into the prospective
@@ -98,7 +202,7 @@ impl OverlayedChanges {
// Iterate over all prospective and mark all keys that share
// the given prefix as removed (None).
for (key, entry) in self.prospective.iter_mut() {
for (key, entry) in self.prospective.top.iter_mut() {
if key.starts_with(prefix) {
entry.value = None;
@@ -111,9 +215,9 @@ impl OverlayedChanges {
// Then do the same with keys from commited changes.
// NOTE that we are making changes in the prospective change set.
for key in self.committed.keys() {
for key in self.committed.top.keys() {
if key.starts_with(prefix) {
let entry = self.prospective.entry(key.clone()).or_default();
let entry = self.prospective.top.entry(key.clone()).or_default();
entry.value = None;
if let Some(extrinsic) = extrinsic_index {
@@ -134,8 +238,8 @@ impl OverlayedChanges {
if self.committed.is_empty() {
::std::mem::swap(&mut self.prospective, &mut self.committed);
} else {
for (key, val) in self.prospective.drain() {
let entry = self.committed.entry(key).or_default();
for (key, val) in self.prospective.top.drain() {
let entry = self.committed.top.entry(key).or_default();
entry.value = val.value;
if let Some(prospective_extrinsics) = val.extrinsics {
@@ -143,16 +247,16 @@ impl OverlayedChanges {
.extend(prospective_extrinsics);
}
}
}
}
for (storage_key, map) in self.prospective.children.drain() {
let entry = self.committed.children.entry(storage_key).or_default();
entry.1.extend(map.1.iter().map(|(k, v)| (k.clone(), v.clone())));
/// Drain committed changes to an iterator.
///
/// Panics:
/// Will panic if there are any uncommitted prospective changes.
pub fn drain<'a>(&'a mut self) -> impl Iterator<Item=(Vec<u8>, OverlayedValue)> + 'a {
assert!(self.prospective.is_empty());
self.committed.drain()
if let Some(prospective_extrinsics) = map.0 {
entry.0.get_or_insert_with(Default::default)
.extend(prospective_extrinsics);
}
}
}
}
/// Consume `OverlayedChanges` and take committed set.
@@ -161,14 +265,14 @@ impl OverlayedChanges {
/// Will panic if there are any uncommitted prospective changes.
pub fn into_committed(self) -> impl Iterator<Item=(Vec<u8>, Option<Vec<u8>>)> {
assert!(self.prospective.is_empty());
self.committed.into_iter().map(|(k, v)| (k, v.value))
self.committed.top.into_iter().map(|(k, v)| (k, v.value))
}
/// Inserts storage entry responsible for current extrinsic index.
#[cfg(test)]
pub(crate) fn set_extrinsic_index(&mut self, extrinsic_index: u32) {
use codec::Encode;
self.prospective.insert(EXTRINSIC_INDEX.to_vec(), OverlayedValue {
self.prospective.top.insert(EXTRINSIC_INDEX.to_vec(), OverlayedValue {
value: Some(extrinsic_index.encode()),
extrinsics: None,
});
@@ -293,7 +397,7 @@ mod tests {
digest_interval: 4, digest_levels: 1,
}), true);
assert_eq!(
strip_extrinsic_index(&overlay.prospective),
strip_extrinsic_index(&overlay.prospective.top),
vec![
(vec![1], OverlayedValue { value: Some(vec![2]), extrinsics: Some(vec![0].into_iter().collect()) }),
].into_iter().collect(),
@@ -329,7 +433,7 @@ mod tests {
overlay.set_extrinsic_index(2);
overlay.set_storage(vec![1], Some(vec![6]));
assert_eq!(strip_extrinsic_index(&overlay.prospective),
assert_eq!(strip_extrinsic_index(&overlay.prospective.top),
vec![
(vec![1], OverlayedValue { value: Some(vec![6]), extrinsics: Some(vec![0, 2].into_iter().collect()) }),
(vec![3], OverlayedValue { value: Some(vec![4]), extrinsics: Some(vec![1].into_iter().collect()) }),
@@ -344,14 +448,14 @@ mod tests {
overlay.set_extrinsic_index(4);
overlay.set_storage(vec![1], Some(vec![8]));
assert_eq!(strip_extrinsic_index(&overlay.committed),
assert_eq!(strip_extrinsic_index(&overlay.committed.top),
vec![
(vec![1], OverlayedValue { value: Some(vec![6]), extrinsics: Some(vec![0, 2].into_iter().collect()) }),
(vec![3], OverlayedValue { value: Some(vec![4]), extrinsics: Some(vec![1].into_iter().collect()) }),
(vec![100], OverlayedValue { value: Some(vec![101]), extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }),
].into_iter().collect());
assert_eq!(strip_extrinsic_index(&overlay.prospective),
assert_eq!(strip_extrinsic_index(&overlay.prospective.top),
vec![
(vec![1], OverlayedValue { value: Some(vec![8]), extrinsics: Some(vec![4].into_iter().collect()) }),
(vec![3], OverlayedValue { value: Some(vec![7]), extrinsics: Some(vec![3].into_iter().collect()) }),
@@ -359,7 +463,7 @@ mod tests {
overlay.commit_prospective();
assert_eq!(strip_extrinsic_index(&overlay.committed),
assert_eq!(strip_extrinsic_index(&overlay.committed.top),
vec![
(vec![1], OverlayedValue { value: Some(vec![8]), extrinsics: Some(vec![0, 2, 4].into_iter().collect()) }),
(vec![3], OverlayedValue { value: Some(vec![7]), extrinsics: Some(vec![1, 3].into_iter().collect()) }),
@@ -20,7 +20,7 @@ use std::cell::RefCell;
use hash_db::Hasher;
use heapsize::HeapSizeOf;
use hash_db::HashDB;
use trie::{TrieDB, Trie, Recorder, MemoryDB, TrieError};
use trie::{Recorder, MemoryDB, TrieError, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, record_all_keys};
use trie_backend::TrieBackend;
use trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage};
use {Error, ExecutionError, Backend};
@@ -47,10 +47,21 @@ impl<'a, S, H> ProvingBackendEssence<'a, S, H>
let map_e = |e| format!("Trie lookup error: {}", e);
TrieDB::<H>::new(&eph, self.backend.root()).map_err(map_e)?
.get_with(key, &mut *self.proof_recorder)
.map(|x| x.map(|val| val.to_vec()))
.map_err(map_e)
read_trie_value_with(&eph, self.backend.root(), key, &mut *self.proof_recorder).map_err(map_e)
}
pub fn child_storage(&mut self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, String> {
let root = self.storage(storage_key)?.unwrap_or(default_child_trie_root::<H>(storage_key));
let mut read_overlay = MemoryDB::default();
let eph = Ephemeral::new(
self.backend.backend_storage(),
&mut read_overlay,
);
let map_e = |e| format!("Trie lookup error: {}", e);
read_child_trie_value_with(storage_key, &eph, &root, key, &mut *self.proof_recorder).map_err(map_e)
}
pub fn record_all_keys(&mut self) {
@@ -62,20 +73,7 @@ impl<'a, S, H> ProvingBackendEssence<'a, S, H>
let mut iter = move || -> Result<(), Box<TrieError<H::Out>>> {
let root = self.backend.root();
let trie = TrieDB::<H>::new(&eph, root)?;
let iter = trie.iter()?;
for x in iter {
let (key, _) = x?;
// there's currently no API like iter_with()
// => use iter to enumerate all keys AND lookup each
// key using get_with
trie.get_with(&key, &mut *self.proof_recorder)
.map(|x| x.map(|val| val.to_vec()))?;
}
Ok(())
record_all_keys::<H>(&eph, root, &mut *self.proof_recorder)
};
if let Err(e) = iter() {
@@ -128,6 +126,18 @@ impl<S, H> Backend<H> for ProvingBackend<S, H>
}.storage(key)
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
ProvingBackendEssence {
backend: self.backend.essence(),
proof_recorder: &mut *self.proof_recorder.try_borrow_mut()
.expect("only fails when already borrowed; child_storage() is non-reentrant; qed"),
}.child_storage(storage_key, key)
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F) {
self.backend.for_keys_in_child_storage(storage_key, f)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.backend.for_keys_with_prefix(prefix, f)
}
@@ -142,6 +152,14 @@ impl<S, H> Backend<H> for ProvingBackend<S, H>
self.backend.storage_root(delta)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
self.backend.child_storage_root(storage_key, delta)
}
fn try_into_trie_backend(self) -> Option<TrieBackend<Self::TrieBackendStorage, H>> {
None
}
@@ -211,7 +229,7 @@ mod tests {
#[test]
fn proof_recorded_and_checked() {
let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::<Vec<_>>();
let contents = (0..64).map(|i| (None, vec![i], Some(vec![i]))).collect::<Vec<_>>();
let in_memory = InMemory::<Blake2Hasher>::default();
let in_memory = in_memory.update(contents);
let in_memory_root = in_memory.storage_root(::std::iter::empty()).0;
@@ -103,6 +103,10 @@ impl<H: Hasher> Externalities<H> for TestExternalities<H> where H::Out: Ord + He
self.inner.get(key).map(|x| x.to_vec())
}
fn child_storage(&self, _storage_key: &[u8], _key: &[u8]) -> Option<Vec<u8>> {
None
}
fn place_storage(&mut self, key: Vec<u8>, maybe_value: Option<Vec<u8>>) {
self.changes.set_storage(key.clone(), maybe_value.clone());
match maybe_value {
@@ -111,6 +115,12 @@ impl<H: Hasher> Externalities<H> for TestExternalities<H> where H::Out: Ord + He
}
}
fn place_child_storage(&mut self, _storage_key: Vec<u8>, _key: Vec<u8>, _value: Option<Vec<u8>>) -> bool {
false
}
fn kill_child_storage(&mut self, _storage_key: &[u8]) { }
fn clear_prefix(&mut self, prefix: &[u8]) {
self.changes.clear_prefix(prefix);
self.inner.retain(|key, _| !key.starts_with(prefix));
@@ -122,6 +132,10 @@ impl<H: Hasher> Externalities<H> for TestExternalities<H> where H::Out: Ord + He
trie_root::<H, _, _, _>(self.inner.clone())
}
fn child_storage_root(&mut self, _storage_key: &[u8]) -> Option<Vec<u8>> {
None
}
fn storage_changes_root(&mut self, block: u64) -> Option<H::Out> {
compute_changes_trie_root::<_, _, H>(
&InMemory::default(),
@@ -18,7 +18,7 @@
use hash_db::Hasher;
use heapsize::HeapSizeOf;
use trie::{TrieDB, TrieDBMut, TrieError, Trie, TrieMut, MemoryDB};
use trie::{TrieDB, TrieError, Trie, MemoryDB, delta_trie_root, default_child_trie_root, child_delta_trie_root};
use trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral};
use {Backend};
@@ -64,10 +64,18 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
self.essence.storage(key)
}
fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Self::Error> {
self.essence.child_storage(storage_key, key)
}
fn for_keys_with_prefix<F: FnMut(&[u8])>(&self, prefix: &[u8], f: F) {
self.essence.for_keys_with_prefix(prefix, f)
}
fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F) {
self.essence.for_keys_in_child_storage(storage_key, f)
}
fn pairs(&self) -> Vec<(Vec<u8>, Vec<u8>)> {
let mut read_overlay = MemoryDB::default(); // TODO: use new for correctness
let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay);
@@ -97,22 +105,45 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
{
let mut write_overlay = MemoryDB::default();
let mut root = *self.essence.root();
{
let mut eph = Ephemeral::new(
self.essence.backend_storage(),
&mut write_overlay,
);
let mut trie = TrieDBMut::<H>::from_existing(&mut eph, &mut root).expect("prior state root to exist"); // TODO: handle gracefully
for (key, change) in delta {
let result = match change {
Some(val) => trie.insert(&key, &val),
None => trie.remove(&key), // TODO: archive mode
};
match delta_trie_root::<H, _, _, _>(&mut eph, root, delta) {
Ok(ret) => root = ret,
Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e),
}
}
if let Err(e) = result {
warn!(target: "trie", "Failed to write to trie: {}", e);
}
(root, write_overlay)
}
fn child_storage_root<I>(&self, storage_key: &[u8], delta: I) -> (Vec<u8>, Self::Transaction)
where
I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>,
H::Out: Ord
{
let mut write_overlay = MemoryDB::default();
let mut root = match self.storage(storage_key) {
Ok(value) => value.unwrap_or(default_child_trie_root::<H>(storage_key)),
Err(e) => {
warn!(target: "trie", "Failed to read child storage root: {}", e);
default_child_trie_root::<H>(storage_key)
},
};
{
let mut eph = Ephemeral::new(
self.essence.backend_storage(),
&mut write_overlay,
);
match child_delta_trie_root::<H, _, _, _>(storage_key, &mut eph, root.clone(), delta) {
Ok(ret) => root = ret,
Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e),
}
}
@@ -128,6 +159,7 @@ impl<S: TrieBackendStorage<H>, H: Hasher> Backend<H> for TrieBackend<S, H> where
pub mod tests {
use std::collections::HashSet;
use primitives::{Blake2Hasher, H256};
use trie::{TrieMut, TrieDBMut};
use super::*;
fn test_db() -> (MemoryDB<Blake2Hasher>, H256) {
@@ -22,7 +22,7 @@ use std::ops::Deref;
use std::sync::Arc;
use hash_db::{self, Hasher};
use heapsize::HeapSizeOf;
use trie::{TrieDB, Trie, MemoryDB, DBValue, TrieError};
use trie::{TrieDB, Trie, MemoryDB, DBValue, TrieError, default_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie};
use changes_trie::Storage as ChangesTrieStorage;
/// Patricia trie-based storage trait.
@@ -66,8 +66,43 @@ impl<S: TrieBackendStorage<H>, H: Hasher> TrieBackendEssence<S, H> where H::Out:
let map_e = |e| format!("Trie lookup error: {}", e);
TrieDB::<H>::new(&eph, &self.root).map_err(map_e)?
.get(key).map(|x| x.map(|val| val.to_vec())).map_err(map_e)
read_trie_value(&eph, &self.root, key).map_err(map_e)
}
/// Get the value of child storage at given key.
pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, String> {
let root = self.storage(storage_key)?.unwrap_or(default_child_trie_root::<H>(storage_key));
let mut read_overlay = MemoryDB::default();
let eph = Ephemeral {
storage: &self.storage,
overlay: &mut read_overlay,
};
let map_e = |e| format!("Trie lookup error: {}", e);
read_child_trie_value(storage_key, &eph, &root, key).map_err(map_e)
}
/// Retrieve all entries keys of child storage and call `f` for each of those keys.
pub fn for_keys_in_child_storage<F: FnMut(&[u8])>(&self, storage_key: &[u8], f: F) {
let root = match self.storage(storage_key) {
Ok(v) => v.unwrap_or(default_child_trie_root::<H>(storage_key)),
Err(e) => {
debug!(target: "trie", "Error while iterating child storage: {}", e);
return;
}
};
let mut read_overlay = MemoryDB::default();
let eph = Ephemeral {
storage: &self.storage,
overlay: &mut read_overlay,
};
if let Err(e) = for_keys_in_child_trie::<H, _>(storage_key, &eph, &root, f) {
debug!(target: "trie", "Error while iterating child storage: {}", e);
}
}
/// Execute given closure for all keys starting with prefix.
+9 -3
View File
@@ -54,7 +54,7 @@ use rstd::prelude::*;
use codec::{Encode, Decode};
use runtime_api::runtime::*;
use runtime_primitives::traits::{BlindCheckable, BlakeTwo256, Block as BlockT};
use runtime_primitives::traits::{BlindCheckable, BlakeTwo256, Block as BlockT, Extrinsic as ExtrinsicT};
use runtime_primitives::{ApplyResult, Ed25519Signature, transaction_validity::TransactionValidity};
use runtime_version::RuntimeVersion;
pub use primitives::hash::H256;
@@ -115,6 +115,12 @@ impl BlindCheckable for Extrinsic {
}
}
impl ExtrinsicT for Extrinsic {
fn is_signed(&self) -> Option<bool> {
Some(true)
}
}
/// An identifier for an account on this system.
pub type AccountId = H256;
/// A simple hash type for all our hashing.
@@ -185,7 +191,7 @@ impl_apis! {
}
}
impl BlockBuilder<Block, u32, u32, u32> for Runtime {
impl BlockBuilder<Block, u32, u32, u32, u32> for Runtime {
fn initialise_block(header: <Block as BlockT>::Header) {
system::initialise_block(header)
}
@@ -202,7 +208,7 @@ impl_apis! {
unimplemented!()
}
fn check_inherents(_block: Block, _data: u32) -> Result<(), runtime_api::BlockBuilderError> {
fn check_inherents(_block: Block, _data: u32) -> Result<(), u32> {
unimplemented!()
}
+4 -4
View File
@@ -131,12 +131,12 @@ pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity {
p
};
TransactionValidity::Valid(
/* priority: */tx.amount,
TransactionValidity::Valid {
priority: tx.amount,
requires,
provides,
/* longevity: */64
)
longevity: 64
}
}
+1 -2
View File
@@ -470,7 +470,6 @@ name = "sr-api"
version = "0.1.0"
dependencies = [
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-primitives 0.1.0",
"sr-std 0.1.0",
"sr-version 0.1.0",
@@ -503,7 +502,6 @@ dependencies = [
"serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-io 0.1.0",
"sr-std 0.1.0",
"sr-version 0.1.0",
"substrate-primitives 0.1.0",
]
@@ -522,6 +520,7 @@ dependencies = [
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-primitives 0.1.0",
"sr-std 0.1.0",
]
@@ -216,7 +216,7 @@ impl<Hash: hash::Hash + Member, Ex: ::std::fmt::Debug> BasePool<Hash, Ex> {
}
/// Returns an iterator over ready transactions in the pool.
pub fn ready<'a, 'b: 'a>(&'b self) -> impl Iterator<Item=Arc<Transaction<Hash, Ex>>> + 'a {
pub fn ready(&self) -> impl Iterator<Item=Arc<Transaction<Hash, Ex>>> {
self.ready.get()
}
@@ -105,7 +105,7 @@ impl<B: ChainApi> Pool<B> {
}
match self.api.validate_transaction(at, &xt)? {
TransactionValidity::Valid(priority, requires, provides, longevity) => {
TransactionValidity::Valid { priority, requires, provides, longevity } => {
Ok(base::Transaction {
data: xt,
hash,
@@ -197,11 +197,12 @@ impl<B: ChainApi> Pool<B> {
.ok_or_else(|| error::ErrorKind::Msg(format!("Invalid block id: {:?}", at)).into())?
.as_();
let now = time::Instant::now();
let to_remove = self.ready(|pending| pending
.filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx))
.map(|tx| tx.hash.clone())
.collect::<Vec<_>>()
);
let to_remove = {
self.ready()
.filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx))
.map(|tx| tx.hash.clone())
.collect::<Vec<_>>()
};
let futures_to_remove: Vec<ExHash<B>> = {
let p = self.pool.read();
let mut hashes = Vec::new();
@@ -266,20 +267,9 @@ impl<B: ChainApi> Pool<B> {
invalid
}
/// Get ready transactions ordered by priority
pub fn ready<F, X>(&self, f: F) -> X where
F: FnOnce(&mut Iterator<Item=TransactionFor<B>>) -> X,
{
let pool = self.pool.read();
let mut ready = pool.ready();
f(&mut ready)
}
/// Returns all transactions in the pool.
///
/// Be careful with large limit values, as querying the entire pool might be time consuming.
pub fn all(&self, limit: usize) -> Vec<ExtrinsicFor<B>> {
self.ready(|it| it.take(limit).map(|ex| ex.data.clone()).collect())
/// Get an iterator for ready transactions ordered by priority
pub fn ready(&self) -> impl Iterator<Item=TransactionFor<B>> {
self.pool.read().ready()
}
/// Returns pool status.
@@ -341,12 +331,12 @@ mod tests {
if nonce < block_number {
Ok(TransactionValidity::Invalid)
} else {
Ok(TransactionValidity::Valid(
4,
if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] },
vec![vec![nonce as u8]],
3,
))
Ok(TransactionValidity::Valid {
priority: 4,
requires: if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] },
provides: vec![vec![nonce as u8]],
longevity: 3,
})
}
}
@@ -398,7 +388,7 @@ mod tests {
})).unwrap();
// then
assert_eq!(pool.ready(|pending| pending.map(|tx| tx.hash.clone()).collect::<Vec<_>>()), vec![hash]);
assert_eq!(pool.ready().map(|v| v.hash).collect::<Vec<_>>(), vec![hash]);
}
#[test]
@@ -489,7 +479,7 @@ mod tests {
pool.clear_stale(&BlockId::Number(5)).unwrap();
// then
assert_eq!(pool.all(3).len(), 0);
assert_eq!(pool.ready().count(), 0);
assert_eq!(pool.status().future, 0);
assert_eq!(pool.status().ready, 0);
// make sure they are temporarily banned as well
@@ -21,6 +21,7 @@ use std::{
sync::Arc,
};
use parking_lot::RwLock;
use sr_primitives::traits::Member;
use sr_primitives::transaction_validity::{
TransactionTag as Tag,
@@ -79,6 +80,16 @@ struct ReadyTx<Hash, Ex> {
pub requires_offset: usize,
}
impl<Hash: Clone, Ex> Clone for ReadyTx<Hash, Ex> {
fn clone(&self) -> Self {
ReadyTx {
transaction: self.transaction.clone(),
unlocks: self.unlocks.clone(),
requires_offset: self.requires_offset,
}
}
}
const HASH_READY: &str = r#"
Every time transaction is imported its hash is placed in `ready` map and tags in `provided_tags`;
Every time transaction is removed from the queue we remove the hash from `ready` map and from `provided_tags`;
@@ -93,8 +104,7 @@ pub struct ReadyTransactions<Hash: hash::Hash + Eq, Ex> {
/// tags that are provided by Ready transactions
provided_tags: HashMap<Tag, Hash>,
/// Transactions that are ready (i.e. don't have any requirements external to the pool)
ready: HashMap<Hash, ReadyTx<Hash, Ex>>,
// ^^ TODO [ToDr] Consider wrapping this into `Arc<RwLock<>>` and allow multiple concurrent iterators
ready: Arc<RwLock<HashMap<Hash, ReadyTx<Hash, Ex>>>>,
/// Best transactions that are ready to be included to the block without any other previous transaction.
best: BTreeSet<TransactionRef<Hash, Ex>>,
}
@@ -127,9 +137,9 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
/// - transactions that are valid for a shorter time go first
/// 4. Lastly we sort by the time in the queue
/// - transactions that are longer in the queue go first
pub fn get<'a>(&'a self) -> impl Iterator<Item=Arc<Transaction<Hash, Ex>>> + 'a {
pub fn get(&self) -> impl Iterator<Item=Arc<Transaction<Hash, Ex>>> {
BestIterator {
all: &self.ready,
all: self.ready.clone(),
best: self.best.clone(),
awaiting: Default::default(),
}
@@ -144,7 +154,7 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
tx: WaitingTransaction<Hash, Ex>,
) -> error::Result<Vec<Arc<Transaction<Hash, Ex>>>> {
assert!(tx.is_ready(), "Only ready transactions can be imported.");
assert!(!self.ready.contains_key(&tx.transaction.hash), "Transaction is already imported.");
assert!(!self.ready.read().contains_key(&tx.transaction.hash), "Transaction is already imported.");
self.insertion_id += 1;
let insertion_id = self.insertion_id;
@@ -154,11 +164,12 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
let replaced = self.replace_previous(&tx)?;
let mut goes_to_best = true;
let mut ready = self.ready.write();
// Add links to transactions that unlock the current one
for tag in &tx.requires {
// Check if the transaction that satisfies the tag is still in the queue.
if let Some(other) = self.provided_tags.get(tag) {
let mut tx = self.ready.get_mut(other).expect(HASH_READY);
let mut tx = ready.get_mut(other).expect(HASH_READY);
tx.unlocks.push(hash.clone());
// this transaction depends on some other, so it doesn't go to best directly.
goes_to_best = false;
@@ -181,7 +192,7 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
}
// insert to Ready
self.ready.insert(hash, ReadyTx {
ready.insert(hash, ReadyTx {
transaction,
unlocks: vec![],
requires_offset: 0,
@@ -192,7 +203,7 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
/// Returns true if given hash is part of the queue.
pub fn contains(&self, hash: &Hash) -> bool {
self.ready.contains_key(hash)
self.ready.read().contains_key(hash)
}
/// Removes invalid transactions from the ready pool.
@@ -204,13 +215,14 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
let mut removed = vec![];
let mut to_remove = hashes.iter().cloned().collect::<Vec<_>>();
let mut ready = self.ready.write();
loop {
let hash = match to_remove.pop() {
Some(hash) => hash,
None => return removed,
};
if let Some(mut tx) = self.ready.remove(&hash) {
if let Some(mut tx) = ready.remove(&hash) {
// remove entries from provided_tags
for tag in &tx.transaction.transaction.provides {
self.provided_tags.remove(tag);
@@ -218,7 +230,7 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
// remove from unlocks
for tag in &tx.transaction.transaction.requires {
if let Some(hash) = self.provided_tags.get(tag) {
if let Some(tx) = self.ready.get_mut(hash) {
if let Some(tx) = ready.get_mut(hash) {
remove_item(&mut tx.unlocks, &hash);
}
}
@@ -253,7 +265,7 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
};
let res = self.provided_tags.remove(&tag)
.and_then(|hash| self.ready.remove(&hash));
.and_then(|hash| self.ready.write().remove(&hash));
if let Some(tx) = res {
let unlocks = tx.unlocks;
@@ -262,9 +274,10 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
// prune previous transactions as well
{
let hash = &tx.hash;
let mut ready = self.ready.write();
let mut find_previous = |tag| -> Option<Vec<Tag>> {
let prev_hash = self.provided_tags.get(tag)?;
let tx2 = self.ready.get_mut(&prev_hash)?;
let tx2 = ready.get_mut(&prev_hash)?;
remove_item(&mut tx2.unlocks, hash);
// We eagerly prune previous transactions as well.
// But it might not always be good.
@@ -292,7 +305,7 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
// add the transactions that just got unlocked to `best`
for hash in unlocks {
if let Some(tx) = self.ready.get_mut(&hash) {
if let Some(tx) = self.ready.write().get_mut(&hash) {
tx.requires_offset += 1;
// this transaction is ready
if tx.requires_offset == tx.transaction.transaction.requires.len() {
@@ -328,10 +341,13 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
}
// now check if collective priority is lower than the replacement transaction.
let old_priority = replace_hashes
.iter()
.filter_map(|hash| self.ready.get(hash))
.fold(0u64, |total, tx| total.saturating_add(tx.transaction.transaction.priority));
let old_priority = {
let ready = self.ready.read();
replace_hashes
.iter()
.filter_map(|hash| ready.get(hash))
.fold(0u64, |total, tx| total.saturating_add(tx.transaction.transaction.priority))
};
// bail - the transaction has too low priority to replace the old ones
if old_priority >= tx.priority {
@@ -349,7 +365,7 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
None => return Ok(removed),
};
let tx = self.ready.remove(&hash).expect(HASH_READY);
let tx = self.ready.write().remove(&hash).expect(HASH_READY);
// check if this transaction provides stuff that is not provided by the new one.
let (mut unlocks, tx) = (tx.unlocks, tx.transaction.transaction);
{
@@ -371,18 +387,18 @@ impl<Hash: hash::Hash + Member, Ex> ReadyTransactions<Hash, Ex> {
/// Returns number of transactions in this queue.
pub fn len(&self) -> usize {
self.ready.len()
self.ready.read().len()
}
}
pub struct BestIterator<'a, Hash: 'a, Ex: 'a> {
all: &'a HashMap<Hash, ReadyTx<Hash, Ex>>,
pub struct BestIterator<Hash, Ex> {
all: Arc<RwLock<HashMap<Hash, ReadyTx<Hash, Ex>>>>,
awaiting: HashMap<Hash, (usize, TransactionRef<Hash, Ex>)>,
best: BTreeSet<TransactionRef<Hash, Ex>>,
}
impl<'a, Hash: 'a + hash::Hash + Member, Ex: 'a> BestIterator<'a, Hash, Ex> {
impl<Hash: hash::Hash + Member, Ex> BestIterator<Hash, Ex> {
/// Depending on number of satisfied requirements insert given ref
/// either to awaiting set or to best set.
fn best_or_awaiting(&mut self, satisfied: usize, tx_ref: TransactionRef<Hash, Ex>) {
@@ -397,32 +413,41 @@ impl<'a, Hash: 'a + hash::Hash + Member, Ex: 'a> BestIterator<'a, Hash, Ex> {
}
}
impl<'a, Hash: 'a + hash::Hash + Member, Ex: 'a> Iterator for BestIterator<'a, Hash, Ex> {
impl<Hash: hash::Hash + Member, Ex> Iterator for BestIterator<Hash, Ex> {
type Item = Arc<Transaction<Hash, Ex>>;
fn next(&mut self) -> Option<Self::Item> {
let best = self.best.iter().next_back()?.clone();
let best = self.best.take(&best)?;
loop {
let best = self.best.iter().next_back()?.clone();
let best = self.best.take(&best)?;
let ready = match self.all.get(&best.transaction.hash) {
Some(ready) => ready,
// The transaction is not in all, maybe it was removed in the meantime?
None => return self.next(),
};
let next = self.all.read().get(&best.transaction.hash).cloned();
let ready = match next {
Some(ready) => ready,
// The transaction is not in all, maybe it was removed in the meantime?
None => continue,
};
// Insert transactions that just got unlocked.
for hash in &ready.unlocks {
// first check local awaiting transactions
if let Some((mut satisfied, tx_ref)) = self.awaiting.remove(hash) {
satisfied += 1;
self.best_or_awaiting(satisfied, tx_ref);
// then get from the pool
} else if let Some(next) = self.all.get(hash) {
self.best_or_awaiting(next.requires_offset + 1, next.transaction.clone());
// Insert transactions that just got unlocked.
for hash in &ready.unlocks {
// first check local awaiting transactions
let res = if let Some((mut satisfied, tx_ref)) = self.awaiting.remove(hash) {
satisfied += 1;
Some((satisfied, tx_ref))
// then get from the pool
} else if let Some(next) = self.all.read().get(hash) {
Some((next.requires_offset + 1, next.transaction.clone()))
} else {
None
};
if let Some((satisfied, tx_ref)) = res {
self.best_or_awaiting(satisfied, tx_ref)
}
}
}
Some(best.transaction.clone())
return Some(best.transaction.clone())
}
}
}
+12 -12
View File
@@ -49,12 +49,12 @@ impl txpool::ChainApi for TestApi {
};
let provides = vec![vec![uxt.transfer.nonce as u8]];
Ok(TransactionValidity::Valid(
/* priority: */1,
Ok(TransactionValidity::Valid {
priority: 1,
requires,
provides,
/* longevity: */64
))
longevity: 64
})
}
fn block_id_to_number(&self, at: &BlockId<Self::Block>) -> error::Result<Option<txpool::NumberFor<Self>>> {
@@ -109,7 +109,7 @@ fn submission_should_work() {
assert_eq!(209, index(&BlockId::number(0)));
pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap();
let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect());
let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect();
assert_eq!(pending, vec![209]);
}
@@ -119,7 +119,7 @@ fn multiple_submission_should_work() {
pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap();
pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap();
let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect());
let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect();
assert_eq!(pending, vec![209, 210]);
}
@@ -128,7 +128,7 @@ fn early_nonce_should_be_culled() {
let pool = pool();
pool.submit_one(&BlockId::number(0), uxt(Alice, 208)).unwrap();
let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect());
let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect();
assert_eq!(pending, Vec::<Index>::new());
}
@@ -137,11 +137,11 @@ fn late_nonce_should_be_queued() {
let pool = pool();
pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap();
let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect());
let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect();
assert_eq!(pending, Vec::<Index>::new());
pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap();
let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect());
let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect();
assert_eq!(pending, vec![209, 210]);
}
@@ -151,12 +151,12 @@ fn prune_tags_should_work() {
pool.submit_one(&BlockId::number(0), uxt(Alice, 209)).unwrap();
pool.submit_one(&BlockId::number(0), uxt(Alice, 210)).unwrap();
let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect());
let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect();
assert_eq!(pending, vec![209, 210]);
pool.prune_tags(&BlockId::number(1), vec![vec![209]]).unwrap();
let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect());
let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect();
assert_eq!(pending, vec![210]);
}
@@ -169,7 +169,7 @@ fn should_ban_invalid_transactions() {
pool.submit_one(&BlockId::number(0), uxt.clone()).unwrap_err();
// when
let pending: Vec<_> = pool.ready(|p| p.map(|a| a.data.transfer.nonce).collect());
let pending: Vec<_> = pool.ready().map(|a| a.data.transfer.nonce).collect();
assert_eq!(pending, Vec::<Index>::new());
// then
+129 -3
View File
@@ -45,7 +45,7 @@ pub use trie_stream::TrieStream;
/// The Substrate format implementation of `NodeCodec`.
pub use node_codec::NodeCodec;
/// Various re-exports from the `trie-db` crate.
pub use trie_db::{Trie, TrieMut, DBValue, Recorder};
pub use trie_db::{Trie, TrieMut, DBValue, Recorder, Query};
/// As in `trie_db`, but less generic, error type for the crate.
pub type TrieError<H> = trie_db::TrieError<H, Error>;
@@ -53,7 +53,7 @@ pub type TrieError<H> = trie_db::TrieError<H, Error>;
pub trait AsHashDB<H: Hasher>: hash_db::AsHashDB<H, trie_db::DBValue> {}
impl<H: Hasher, T: hash_db::AsHashDB<H, trie_db::DBValue>> AsHashDB<H> for T {}
/// As in `hash_db`, but less generic, trait exposed.
pub type HashDB<H> = hash_db::HashDB<H, trie_db::DBValue>;
pub type HashDB<'a, H> = hash_db::HashDB<H, trie_db::DBValue> + 'a;
/// As in `memory_db`, but less generic, trait exposed.
pub type MemoryDB<H> = memory_db::MemoryDB<H, trie_db::DBValue>;
@@ -73,6 +73,36 @@ pub fn trie_root<H: Hasher, I, A, B>(input: I) -> H::Out where
trie_root::trie_root::<H, TrieStream, _, _, _>(input)
}
/// Determine a trie root given a hash DB and delta values.
pub fn delta_trie_root<H: Hasher, I, A, B>(db: &mut HashDB<H>, mut root: H::Out, delta: I) -> Result<H::Out, Box<TrieError<H::Out>>> where
I: IntoIterator<Item = (A, Option<B>)>,
A: AsRef<[u8]> + Ord,
B: AsRef<[u8]>,
{
{
let mut trie = TrieDBMut::<H>::from_existing(db, &mut root)?;
for (key, change) in delta {
match change {
Some(val) => trie.insert(key.as_ref(), val.as_ref())?,
None => trie.remove(key.as_ref())?, // TODO: archive mode
};
}
}
Ok(root)
}
/// Read a value from the trie.
pub fn read_trie_value<H: Hasher>(db: &HashDB<H>, root: &H::Out, key: &[u8]) -> Result<Option<Vec<u8>>, Box<TrieError<H::Out>>> {
Ok(TrieDB::<H>::new(db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?)
}
/// Read a value from the trie with given Query.
pub fn read_trie_value_with<H: Hasher, Q: Query<H, Item=DBValue>>(db: &HashDB<H>, root: &H::Out, key: &[u8], query: Q) -> Result<Option<Vec<u8>>, Box<TrieError<H::Out>>> {
Ok(TrieDB::<H>::new(db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?)
}
/// Determine a trie root node's data given its ordered contents, closed form.
pub fn unhashed_trie<H: Hasher, I, A, B>(input: I) -> Vec<u8> where
I: IntoIterator<Item = (A, B)>,
@@ -95,6 +125,102 @@ where
)
}
/// Determine whether a child trie key is valid. `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them.
pub fn is_child_trie_key_valid<H: Hasher>(_storage_key: &[u8]) -> bool {
true
}
/// Determine the default child trie root.
pub fn default_child_trie_root<H: Hasher>(_storage_key: &[u8]) -> Vec<u8> {
let mut db = MemoryDB::default();
let mut root = H::Out::default();
let mut empty = TrieDBMut::<H>::new(&mut db, &mut root);
empty.commit();
empty.root().as_ref().to_vec()
}
/// Determine a child trie root given its ordered contents, closed form. H is the default hasher, but a generic
/// implementation may ignore this type parameter and use other hashers.
pub fn child_trie_root<H: Hasher, I, A, B>(_storage_key: &[u8], input: I) -> Vec<u8> where
I: IntoIterator<Item = (A, B)>,
A: AsRef<[u8]> + Ord,
B: AsRef<[u8]>,
{
trie_root::<H, _, _, _>(input).as_ref().iter().cloned().collect()
}
/// Determine a child trie root given a hash DB and delta values. H is the default hasher, but a generic implementation may ignore this type parameter and use other hashers.
pub fn child_delta_trie_root<H: Hasher, I, A, B>(_storage_key: &[u8], db: &mut HashDB<H>, root_vec: Vec<u8>, delta: I) -> Result<Vec<u8>, Box<TrieError<H::Out>>> where
I: IntoIterator<Item = (A, Option<B>)>,
A: AsRef<[u8]> + Ord,
B: AsRef<[u8]>,
{
let mut root = H::Out::default();
root.as_mut().copy_from_slice(&root_vec); // root is fetched from DB, not writable by runtime, so it's always valid.
{
let mut trie = TrieDBMut::<H>::from_existing(db, &mut root)?;
for (key, change) in delta {
match change {
Some(val) => trie.insert(key.as_ref(), val.as_ref())?,
None => trie.remove(key.as_ref())?, // TODO: archive mode
};
}
}
Ok(root.as_ref().to_vec())
}
/// Call `f` for all keys in a child trie.
pub fn for_keys_in_child_trie<H: Hasher, F: FnMut(&[u8])>(_storage_key: &[u8], db: &HashDB<H>, root_slice: &[u8], mut f: F) -> Result<(), Box<TrieError<H::Out>>> {
let mut root = H::Out::default();
root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid.
let trie = TrieDB::<H>::new(db, &root)?;
let iter = trie.iter()?;
for x in iter {
let (key, _) = x?;
f(&key);
}
Ok(())
}
/// Record all keys for a given root.
pub fn record_all_keys<H: Hasher>(db: &HashDB<H>, root: &H::Out, recorder: &mut Recorder<H::Out>) -> Result<(), Box<TrieError<H::Out>>> {
let trie = TrieDB::<H>::new(db, root)?;
let iter = trie.iter()?;
for x in iter {
let (key, _) = x?;
// there's currently no API like iter_with()
// => use iter to enumerate all keys AND lookup each
// key using get_with
trie.get_with(&key, &mut *recorder)?;
}
Ok(())
}
/// Read a value from the child trie.
pub fn read_child_trie_value<H: Hasher>(_storage_key: &[u8], db: &HashDB<H>, root_slice: &[u8], key: &[u8]) -> Result<Option<Vec<u8>>, Box<TrieError<H::Out>>> {
let mut root = H::Out::default();
root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid.
Ok(TrieDB::<H>::new(db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?)
}
/// Read a value from the child trie with given query.
pub fn read_child_trie_value_with<H: Hasher, Q: Query<H, Item=DBValue>>(_storage_key: &[u8], db: &HashDB<H>, root_slice: &[u8], key: &[u8], query: Q) -> Result<Option<Vec<u8>>, Box<TrieError<H::Out>>> {
let mut root = H::Out::default();
root.as_mut().copy_from_slice(root_slice); // root is fetched from DB, not writable by runtime, so it's always valid.
Ok(TrieDB::<H>::new(db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?)
}
// Utilities (not exported):
const EMPTY_TRIE: u8 = 0;
@@ -418,4 +544,4 @@ mod tests {
assert_eq!(pairs, iter_pairs);
}
}
}
+13 -1
View File
@@ -9,4 +9,16 @@ log = "0.4"
tokio = "0.1.7"
exit-future = "0.1"
substrate-cli = { path = "../../core/cli" }
node-service = { path = "../service" }
substrate-primitives = { path = "../../core/primitives" }
node-runtime = { path = "../runtime" }
node-primitives = { path = "../primitives" }
node-network = { path = "../network" }
hex-literal = "0.1"
substrate-service = { path = "../../core/service" }
substrate-transaction-pool = { path = "../../core/transaction-pool" }
substrate-network = { path = "../../core/network" }
sr-primitives = { path = "../../core/sr-primitives" }
node-executor = { path = "../executor" }
[dev-dependencies]
substrate-service-test = { path = "../../core/service/test" }
@@ -20,11 +20,14 @@ use primitives::{AuthorityId, ed25519};
use node_runtime::{GenesisConfig, ConsensusConfig, CouncilSeatsConfig, CouncilVotingConfig, DemocracyConfig,
SessionConfig, StakingConfig, TimestampConfig, BalancesConfig, TreasuryConfig,
ContractConfig, Permill, Perbill};
use service::ChainSpec;
use substrate_service;
const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/";
pub fn bbq_birch_config() -> Result<ChainSpec<GenesisConfig>, String> {
/// Specialised `ChainSpec`.
pub type ChainSpec = substrate_service::ChainSpec<GenesisConfig>;
pub fn bbq_birch_config() -> Result<ChainSpec, String> {
ChainSpec::from_embedded(include_bytes!("../res/bbq-birch.json"))
}
@@ -122,7 +125,7 @@ fn staging_testnet_config_genesis() -> GenesisConfig {
}
/// Staging testnet config.
pub fn staging_testnet_config() -> ChainSpec<GenesisConfig> {
pub fn staging_testnet_config() -> ChainSpec {
let boot_nodes = vec![
];
ChainSpec::from_genesis(
@@ -227,7 +230,7 @@ fn development_config_genesis() -> GenesisConfig {
}
/// Development config (single validator Alice)
pub fn development_config() -> ChainSpec<GenesisConfig> {
pub fn development_config() -> ChainSpec {
ChainSpec::from_genesis("Development", "development", development_config_genesis, vec![], None, None, None)
}
@@ -238,18 +241,30 @@ fn local_testnet_genesis() -> GenesisConfig {
])
}
fn local_testnet_genesis_instant() -> GenesisConfig {
let mut genesis = local_testnet_genesis();
genesis.timestamp = Some(TimestampConfig { period: 0 });
genesis
}
/// Local testnet config (multivalidator Alice + Bob)
pub fn local_testnet_config() -> ChainSpec<GenesisConfig> {
pub fn local_testnet_config() -> ChainSpec {
ChainSpec::from_genesis("Local Testnet", "local_testnet", local_testnet_genesis, vec![], None, None, None)
}
/// Local testnet config (multivalidator Alice + Bob)
pub fn integration_test_config() -> ChainSpec<GenesisConfig> {
ChainSpec::from_genesis("Integration Test", "test", local_testnet_genesis_instant, vec![], None, None, None)
#[cfg(test)]
mod tests {
use super::*;
use service_test;
use service::Factory;
fn local_testnet_genesis_instant() -> GenesisConfig {
let mut genesis = local_testnet_genesis();
genesis.timestamp = Some(TimestampConfig { period: 0 });
genesis
}
/// Local testnet config (multivalidator Alice + Bob)
pub fn integration_test_config() -> ChainSpec {
ChainSpec::from_genesis("Integration Test", "test", local_testnet_genesis_instant, vec![], None, None, None)
}
#[test]
fn test_connectivity() {
service_test::connectivity::<Factory>(integration_test_config());
}
}
+25 -10
View File
@@ -22,17 +22,32 @@
extern crate tokio;
extern crate substrate_cli as cli;
extern crate node_service as service;
extern crate substrate_primitives as primitives;
extern crate node_runtime;
extern crate exit_future;
#[macro_use]
extern crate hex_literal;
#[cfg(test)]
extern crate substrate_service_test as service_test;
extern crate substrate_transaction_pool as transaction_pool;
extern crate substrate_network as network;
extern crate node_network;
extern crate sr_primitives as runtime_primitives;
extern crate node_primitives;
#[macro_use]
extern crate substrate_service;
extern crate node_executor;
#[macro_use]
extern crate log;
pub use cli::error;
mod chain_spec;
mod service;
use tokio::runtime::Runtime;
pub use service::{Components as ServiceComponents, Service, CustomConfiguration, ServiceFactory};
pub use cli::{VersionInfo, IntoExit};
use substrate_service::{ServiceFactory, Roles as ServiceRoles};
/// The chain specification option.
#[derive(Clone, Debug)]
@@ -49,12 +64,12 @@ pub enum ChainSpec {
/// Get a chain config from a spec setting.
impl ChainSpec {
pub(crate) fn load(self) -> Result<service::ChainSpec, String> {
pub(crate) fn load(self) -> Result<chain_spec::ChainSpec, String> {
Ok(match self {
ChainSpec::BbqBirch => service::chain_spec::bbq_birch_config()?,
ChainSpec::Development => service::chain_spec::development_config(),
ChainSpec::LocalTestnet => service::chain_spec::local_testnet_config(),
ChainSpec::StagingTestnet => service::chain_spec::staging_testnet_config(),
ChainSpec::BbqBirch => chain_spec::bbq_birch_config()?,
ChainSpec::Development => chain_spec::development_config(),
ChainSpec::LocalTestnet => chain_spec::local_testnet_config(),
ChainSpec::StagingTestnet => chain_spec::staging_testnet_config(),
})
}
@@ -69,7 +84,7 @@ impl ChainSpec {
}
}
fn load_spec(id: &str) -> Result<Option<service::ChainSpec>, String> {
fn load_spec(id: &str) -> Result<Option<chain_spec::ChainSpec>, String> {
Ok(match ChainSpec::from(id) {
Some(spec) => Some(spec.load()?),
None => None,
@@ -93,7 +108,7 @@ pub fn run<I, T, E>(args: I, exit: E, version: cli::VersionInfo) -> error::Resul
info!("Roles: {:?}", config.roles);
let mut runtime = Runtime::new()?;
let executor = runtime.executor();
match config.roles == service::Roles::LIGHT {
match config.roles == ServiceRoles::LIGHT {
true => run_until_exit(&mut runtime, service::Factory::new_light(config, executor)?, exit)?,
false => run_until_exit(&mut runtime, service::Factory::new_full(config, executor)?, exit)?,
}
@@ -108,7 +123,7 @@ fn run_until_exit<C, E>(
e: E,
) -> error::Result<()>
where
C: service::Components,
C: substrate_service::Components,
E: IntoExit,
{
let (exit_send, exit) = exit_future::signal();
+133
View File
@@ -0,0 +1,133 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
#![warn(unused_extern_crates)]
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
use std::sync::Arc;
use transaction_pool::{self, txpool::{Pool as TransactionPool}};
use node_primitives::Block;
use node_runtime::GenesisConfig;
use node_network::Protocol as NodeProtocol;
use substrate_service::{
FactoryFullConfiguration, LightComponents, FullComponents, FullBackend,
LightBackend, FullExecutor, LightExecutor
};
use network::import_queue::{BasicQueue, BlockOrigin, ImportBlock, Verifier};
use runtime_primitives::{traits::Block as BlockT};
use primitives::AuthorityId;
use node_executor;
// TODO: Remove me, when we have a functional consensus.
/// A verifier that doesn't actually do any checks
pub struct NoneVerifier;
/// This Verifiyer accepts all data as valid
impl<B: BlockT> Verifier<B> for NoneVerifier {
fn verify(
&self,
origin: BlockOrigin,
header: B::Header,
justification: Vec<u8>,
body: Option<Vec<B::Extrinsic>>
) -> Result<(ImportBlock<B>, Option<Vec<AuthorityId>>), String> {
Ok((ImportBlock {
origin,
header,
body,
finalized: true,
external_justification: justification,
internal_justification: vec![],
auxiliary: Vec::new(),
}, None))
}
}
construct_simple_service!(Service);
construct_service_factory! {
struct Factory {
Block = Block,
NetworkProtocol = NodeProtocol { |config| Ok(NodeProtocol::new()) },
RuntimeDispatch = node_executor::Executor,
FullTransactionPoolApi = transaction_pool::ChainApi<FullBackend<Self>, FullExecutor<Self>, Block>
{ |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) },
LightTransactionPoolApi = transaction_pool::ChainApi<LightBackend<Self>, LightExecutor<Self>, Block>
{ |config, client| Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client))) },
Genesis = GenesisConfig,
Configuration = (),
FullService = Service<FullComponents<Self>>
{ |config, executor| Service::<FullComponents<Factory>>::new(config, executor) },
LightService = Service<LightComponents<Self>>
{ |config, executor| Service::<LightComponents<Factory>>::new(config, executor) },
ImportQueue = BasicQueue<Block, NoneVerifier>
{ |_, _| Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) }
{ |_, _| Ok(BasicQueue::new(Arc::new(NoneVerifier {}))) },
}
}
#[cfg(test)]
mod tests {
#[cfg(feature = "rhd")]
fn test_sync() {
use {service_test, Factory};
use client::{ImportBlock, BlockOrigin};
let alice: Arc<ed25519::Pair> = Arc::new(Keyring::Alice.into());
let bob: Arc<ed25519::Pair> = Arc::new(Keyring::Bob.into());
let validators = vec![alice.public().0.into(), bob.public().0.into()];
let keys: Vec<&ed25519::Pair> = vec![&*alice, &*bob];
let offline = Arc::new(RwLock::new(OfflineTracker::new()));
let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap();
let block_factory = |service: &<Factory as service::ServiceFactory>::FullService| {
let block_id = BlockId::number(service.client().info().unwrap().chain.best_number);
let parent_header = service.client().header(&block_id).unwrap().unwrap();
let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone());
let proposer_factory = consensus::ProposerFactory {
client: service.client().clone(),
transaction_pool: service.transaction_pool().clone(),
network: consensus_net,
offline: offline.clone(),
force_delay: 0,
handle: dummy_runtime.executor(),
};
let (proposer, _, _) = proposer_factory.init(&parent_header, &validators, alice.clone()).unwrap();
let block = proposer.propose().expect("Error making test block");
ImportBlock {
origin: BlockOrigin::File,
external_justification: Vec::new(),
internal_justification: Vec::new(),
finalized: true,
body: Some(block.extrinsics),
header: block.header,
auxiliary: Vec::new(),
}
};
let extrinsic_factory = |service: &<Factory as service::ServiceFactory>::FullService| {
let payload = (0, Call::Balances(BalancesCall::transfer(RawAddress::Id(bob.public().0.into()), 69.into())), Era::immortal(), service.client().genesis_hash());
let signature = alice.sign(&payload.encode()).into();
let id = alice.public().0.into();
let xt = UncheckedExtrinsic {
signature: Some((RawAddress::Id(id), signature, payload.0, Era::immortal())),
function: payload.1,
}.encode();
let v: Vec<u8> = Decode::decode(&mut xt.as_slice()).unwrap();
OpaqueExtrinsic(v)
};
service_test::sync::<Factory, _, _>(chain_spec::integration_test_config(), block_factory, extrinsic_factory);
}
}
+6 -7
View File
@@ -48,10 +48,10 @@ use std::sync::Arc;
use std::time::{self, Duration, Instant};
use client::{Client as SubstrateClient, CallExecutor};
use client::runtime_api::{Core, BlockBuilder as BlockBuilderAPI, Miscellaneous, OldTxQueue, BlockBuilderError};
use client::runtime_api::{Core, BlockBuilder as BlockBuilderAPI, Miscellaneous, OldTxQueue};
use codec::{Decode, Encode};
use node_primitives::{AccountId, Timestamp, SessionKey, InherentData};
use node_runtime::Runtime;
use node_primitives::{AccountId, Timestamp, SessionKey};
use node_runtime::{Runtime, InherentError, TimestampInherentError, InherentData};
use primitives::{AuthorityId, ed25519, Blake2Hasher};
use runtime_primitives::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, As, BlockNumberToHash};
use runtime_primitives::generic::{BlockId, Era};
@@ -135,9 +135,8 @@ impl<'a, B, E, Block> AuthoringApi for SubstrateClient<B, E, Block> where
let mut block_builder = self.new_block_at(at)?;
if runtime_version.has_api(*b"inherent", 1) {
for inherent in self.inherent_extrinsics(at, &inherent_data)? {
block_builder.push(inherent)?;
}
self.inherent_extrinsics(at, &inherent_data)?
.into_iter().try_for_each(|i| block_builder.push(i))?;
}
build_ctx(&mut block_builder);
@@ -383,7 +382,7 @@ impl<C, A> bft::Proposer<<C as AuthoringApi>::Block> for Proposer<C, A> where
&inherent
) {
Ok(Ok(())) => None,
Ok(Err(BlockBuilderError::TimestampInFuture(timestamp))) => Some(timestamp),
Ok(Err(InherentError::Timestamp(TimestampInherentError::TimestampInFuture(timestamp)))) => Some(timestamp),
Ok(Err(e)) => {
debug!(target: "bft", "Invalid proposal (check_inherents): {:?}", e);
return Box::new(future::ok(false));
+4 -17
View File
@@ -36,7 +36,7 @@ use rstd::prelude::*;
use runtime_primitives::generic;
#[cfg(feature = "std")]
use primitives::bytes;
use runtime_primitives::traits::BlakeTwo256;
use runtime_primitives::traits::{BlakeTwo256, self};
/// An index to a block.
pub type BlockNumber = u64;
@@ -79,22 +79,9 @@ pub type BlockId = generic::BlockId<Block>;
#[derive(PartialEq, Eq, Clone, Default, Encode, Decode)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))]
pub struct UncheckedExtrinsic(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec<u8>);
///
/// Inherent data to include in a block.
#[derive(Encode, Decode)]
pub struct InherentData {
/// Current timestamp.
pub timestamp: Timestamp,
/// Indices of offline validators.
pub offline_indices: Vec<u32>,
}
impl InherentData {
/// Create a new `InherentData` instance.
pub fn new(timestamp: Timestamp, offline_indices: Vec<u32>) -> Self {
Self {
timestamp,
offline_indices
}
impl traits::Extrinsic for UncheckedExtrinsic {
fn is_signed(&self) -> Option<bool> {
None
}
}
+15 -70
View File
@@ -40,7 +40,6 @@ extern crate substrate_primitives;
#[macro_use]
extern crate parity_codec_derive;
#[cfg_attr(not(feature = "std"), macro_use)]
extern crate sr_std as rstd;
extern crate srml_balances as balances;
extern crate srml_consensus as consensus;
@@ -61,13 +60,13 @@ use rstd::prelude::*;
use substrate_primitives::u32_trait::{_2, _4};
use node_primitives::{
AccountId, AccountIndex, Balance, BlockNumber, Hash, Index,
SessionKey, Signature, InherentData, Timestamp as TimestampType
SessionKey, Signature
};
use runtime_api::{BlockBuilderError, runtime::*};
use runtime_api::runtime::*;
use runtime_primitives::ApplyResult;
use runtime_primitives::transaction_validity::TransactionValidity;
use runtime_primitives::generic;
use runtime_primitives::traits::{Convert, BlakeTwo256, DigestItem, Block as BlockT};
use runtime_primitives::traits::{Convert, BlakeTwo256, Block as BlockT};
use version::{RuntimeVersion, ApiId};
use council::{motions as council_motions, voting as council_voting};
#[cfg(feature = "std")]
@@ -192,40 +191,24 @@ impl contract::Trait for Runtime {
type Event = Event;
}
impl DigestItem for Log {
type Hash = Hash;
type AuthorityId = SessionKey;
fn as_authorities_change(&self) -> Option<&[Self::AuthorityId]> {
match self.0 {
InternalLog::consensus(ref item) => item.as_authorities_change(),
_ => None,
}
}
fn as_changes_trie_root(&self) -> Option<&Self::Hash> {
match self.0 {
InternalLog::system(ref item) => item.as_changes_trie_root(),
_ => None,
}
}
}
construct_runtime!(
pub enum Runtime with Log(InternalLog: DigestItem<Hash, SessionKey>) {
pub enum Runtime with Log(InternalLog: DigestItem<Hash, SessionKey>) where
Block = Block,
UncheckedExtrinsic = UncheckedExtrinsic
{
System: system::{default, Log(ChangesTrieRoot)},
Consensus: consensus::{Module, Call, Storage, Config, Log(AuthoritiesChange)},
Consensus: consensus::{Module, Call, Storage, Config<T>, Log(AuthoritiesChange), Inherent},
Balances: balances,
Timestamp: timestamp::{Module, Call, Storage, Config},
Timestamp: timestamp::{Module, Call, Storage, Config<T>, Inherent},
Session: session,
Staking: staking,
Democracy: democracy,
Council: council::{Module, Call, Storage, Event<T>},
CouncilVoting: council_voting,
CouncilMotions: council_motions::{Module, Call, Storage, Event<T>, Origin},
CouncilSeats: council_seats::{Config},
CouncilSeats: council_seats::{Config<T>},
Treasury: treasury,
Contract: contract::{Module, Call, Config, Event<T>},
Contract: contract::{Module, Call, Config<T>, Event<T>},
}
);
@@ -269,7 +252,7 @@ impl_apis! {
}
}
impl BlockBuilder<Block, InherentData, UncheckedExtrinsic, InherentData> for Runtime {
impl BlockBuilder<Block, InherentData, UncheckedExtrinsic, InherentData, InherentError> for Runtime {
fn initialise_block(header: <Block as BlockT>::Header) {
Executive::initialise_block(&header)
}
@@ -283,49 +266,11 @@ impl_apis! {
}
fn inherent_extrinsics(data: InherentData) -> Vec<UncheckedExtrinsic> {
let mut inherent = vec![generic::UncheckedMortalExtrinsic::new_unsigned(
Call::Timestamp(TimestampCall::set(data.timestamp.into()))
)];
if !data.offline_indices.is_empty() {
inherent.push(generic::UncheckedMortalExtrinsic::new_unsigned(
Call::Consensus(ConsensusCall::note_offline(data.offline_indices))
));
}
inherent
data.create_inherent_extrinsics()
}
fn check_inherents(block: Block, data: InherentData) -> Result<(), BlockBuilderError> {
// TODO: v1: should be automatically gathered
// Timestamp module...
const MAX_TIMESTAMP_DRIFT: TimestampType = 60;
let xt = block.extrinsics.get(TIMESTAMP_SET_POSITION as usize)
.ok_or_else(|| BlockBuilderError::Generic("No valid timestamp inherent in block".into()))?;
let t = match (xt.is_signed(), &xt.function) {
(false, Call::Timestamp(TimestampCall::set(t))) => t,
_ => return Err(BlockBuilderError::Generic("No valid timestamp inherent in block".into())),
};
let t = (*t).into();
if t > data.timestamp + MAX_TIMESTAMP_DRIFT {
return Err(BlockBuilderError::TimestampInFuture(t))
}
// Offline indices
let noted_offline =
block.extrinsics.get(NOTE_OFFLINE_POSITION as usize).and_then(|xt| match xt.function {
Call::Consensus(ConsensusCall::note_offline(ref x)) => Some(&x[..]),
_ => None,
}).unwrap_or(&[]);
noted_offline.iter().try_for_each(|n|
if !data.offline_indices.contains(n) {
Err(BlockBuilderError::Generic("Online node marked offline".into()))
} else {
Ok(())
}
)
fn check_inherents(block: Block, data: InherentData) -> Result<(), InherentError> {
data.check_inherents(block)
}
fn random_seed() -> <Block as BlockT>::Hash {
+2 -2
View File
@@ -530,7 +530,6 @@ name = "sr-api"
version = "0.1.0"
dependencies = [
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-primitives 0.1.0",
"sr-std 0.1.0",
"sr-version 0.1.0",
@@ -563,7 +562,6 @@ dependencies = [
"serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-io 0.1.0",
"sr-std 0.1.0",
"sr-version 0.1.0",
"substrate-primitives 0.1.0",
]
@@ -593,6 +591,7 @@ dependencies = [
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-primitives 0.1.0",
"sr-std 0.1.0",
]
@@ -788,6 +787,7 @@ version = "0.1.0"
dependencies = [
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec 2.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-codec-derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.79 (registry+https://github.com/rust-lang/crates.io-index)",
"sr-io 0.1.0",
-33
View File
@@ -1,33 +0,0 @@
[package]
name = "node-service"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
error-chain = "0.12"
hex-literal = "0.1"
lazy_static = "1.0"
log = "0.4"
node-consensus = { path = "../consensus" }
node-executor = { path = "../executor" }
node-network = { path = "../network" }
node-primitives = { path = "../primitives" }
node-runtime = { path = "../runtime" }
parity-codec = { version = "2.1" }
parking_lot = "0.4"
slog = "^2"
sr-io = { path = "../../core/sr-io" }
sr-primitives = { path = "../../core/sr-primitives" }
substrate-client = { path = "../../core/client" }
substrate-network = { path = "../../core/network" }
substrate-primitives = { path = "../../core/primitives" }
substrate-service = { path = "../../core/service" }
substrate-telemetry = { path = "../../core/telemetry" }
substrate-transaction-pool = { path = "../../core/transaction-pool" }
tokio = "0.1.7"
[dev-dependencies]
substrate-service-test = { path = "../../core/service/test" }
substrate-test-client = { path = "../../core/test-client" }
substrate-keyring = { path = "../../core/keyring" }
rhododendron = "0.3"
-265
View File
@@ -1,265 +0,0 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
#![warn(unused_extern_crates)]
//! Substrate service. Specialized wrapper over substrate service.
extern crate node_primitives;
extern crate node_runtime;
extern crate node_executor;
extern crate node_network;
extern crate substrate_client as client;
extern crate substrate_network as network;
extern crate substrate_primitives as primitives;
extern crate substrate_service as service;
extern crate substrate_transaction_pool as transaction_pool;
extern crate tokio;
#[cfg(test)]
extern crate substrate_service_test as service_test;
#[macro_use]
extern crate hex_literal;
#[cfg(all(test, feature="rhd"))]
extern crate rhododendron as rhd;
extern crate sr_primitives as runtime_primitives;
pub mod chain_spec;
use std::sync::Arc;
use transaction_pool::txpool::{Pool as TransactionPool};
use node_primitives::{Block, Hash};
use node_runtime::GenesisConfig;
use client::Client;
use node_network::Protocol as DemoProtocol;
use tokio::runtime::TaskExecutor;
use service::FactoryFullConfiguration;
use network::import_queue::{BasicQueue, BlockOrigin, ImportBlock, Verifier};
use runtime_primitives::{traits::Block as BlockT};
use primitives::{Blake2Hasher, AuthorityId};
pub use service::{Roles, PruningMode, TransactionPoolOptions, ServiceFactory,
ErrorKind, Error, ComponentBlock, LightComponents, FullComponents};
pub use client::ExecutionStrategy;
/// Specialised `ChainSpec`.
pub type ChainSpec = service::ChainSpec<GenesisConfig>;
/// Client type for specialised `Components`.
pub type ComponentClient<C> = Client<<C as Components>::Backend, <C as Components>::Executor, Block>;
pub type NetworkService = network::Service<Block, <Factory as service::ServiceFactory>::NetworkProtocol, Hash>;
/// A verifier that doesn't actually do any checks
pub struct NoneVerifier;
/// This Verifiyer accepts all data as valid
impl<B: BlockT> Verifier<B> for NoneVerifier {
fn verify(
&self,
origin: BlockOrigin,
header: B::Header,
justification: Vec<u8>,
body: Option<Vec<B::Extrinsic>>
) -> Result<(ImportBlock<B>, Option<Vec<AuthorityId>>), String> {
Ok((ImportBlock {
origin,
header,
body,
finalized: true,
external_justification: justification,
internal_justification: vec![],
auxiliary: Vec::new(),
}, None))
}
}
/// A collection of type to generalise specific components over full / light client.
pub trait Components: service::Components {
/// Demo API.
type Api: 'static + Send + Sync;
/// Client backend.
type Backend: 'static + client::backend::Backend<Block, Blake2Hasher>;
/// Client executor.
type Executor: 'static + client::CallExecutor<Block, Blake2Hasher> + Send + Sync;
}
impl Components for service::LightComponents<Factory> {
type Api = service::LightClient<Factory>;
type Executor = service::LightExecutor<Factory>;
type Backend = service::LightBackend<Factory>;
}
impl Components for service::FullComponents<Factory> {
type Api = service::FullClient<Factory>;
type Executor = service::FullExecutor<Factory>;
type Backend = service::FullBackend<Factory>;
}
/// All configuration for the node.
pub type Configuration = FactoryFullConfiguration<Factory>;
/// Demo-specific configuration.
#[derive(Default)]
pub struct CustomConfiguration;
/// Config for the substrate service.
pub struct Factory;
impl service::ServiceFactory for Factory {
type Block = Block;
type ExtrinsicHash = Hash;
type NetworkProtocol = DemoProtocol;
type RuntimeDispatch = node_executor::Executor;
type FullTransactionPoolApi = transaction_pool::ChainApi<service::FullBackend<Self>, service::FullExecutor<Self>, Block>;
type LightTransactionPoolApi = transaction_pool::ChainApi<service::LightBackend<Self>, service::LightExecutor<Self>, Block>;
type Genesis = GenesisConfig;
type Configuration = CustomConfiguration;
type FullService = Service<service::FullComponents<Self>>;
type LightService = Service<service::LightComponents<Self>>;
/// instance of import queue for clients
type ImportQueue = BasicQueue<Block, NoneVerifier>;
fn build_full_transaction_pool(config: TransactionPoolOptions, client: Arc<service::FullClient<Self>>)
-> Result<TransactionPool<Self::FullTransactionPoolApi>, Error>
{
Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client)))
}
fn build_light_transaction_pool(config: TransactionPoolOptions, client: Arc<service::LightClient<Self>>)
-> Result<TransactionPool<Self::LightTransactionPoolApi>, Error>
{
Ok(TransactionPool::new(config, transaction_pool::ChainApi::new(client)))
}
fn build_network_protocol(_config: &Configuration)
-> Result<DemoProtocol, Error>
{
Ok(DemoProtocol::new())
}
fn build_full_import_queue(
_config: &FactoryFullConfiguration<Self>,
_client: Arc<service::FullClient<Self>>,
) -> Result<BasicQueue<Block, NoneVerifier>, service::Error> {
Ok(BasicQueue::new(Arc::new(NoneVerifier {})))
}
fn build_light_import_queue(
_config: &FactoryFullConfiguration<Self>,
_client: Arc<service::LightClient<Self>>,
) -> Result<BasicQueue<Block, NoneVerifier>, service::Error> {
Ok(BasicQueue::new(Arc::new(NoneVerifier {})))
}
fn new_light(config: Configuration, executor: TaskExecutor)
-> Result<Service<LightComponents<Factory>>, Error>
{
let service = service::Service::<LightComponents<Factory>>::new(config, executor.clone())?;
Ok(Service {
inner: service,
_consensus: None,
})
}
fn new_full(config: Configuration, executor: TaskExecutor)
-> Result<Service<FullComponents<Factory>>, Error>
{
let service = service::Service::<FullComponents<Factory>>::new(config, executor.clone())?;
// FIXME: Spin consensus service if configured
let consensus = None;
Ok(Service {
inner: service,
_consensus: consensus,
})
}
}
/// Demo service.
pub struct Service<C: Components> {
inner: service::Service<C>,
_consensus: Option<bool>, // FIXME: add actual consensus engine
}
impl<C: Components> ::std::ops::Deref for Service<C> {
type Target = service::Service<C>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
/// Creates bare client without any networking.
pub fn new_client(config: Configuration)
-> Result<Arc<service::ComponentClient<FullComponents<Factory>>>, Error>
{
service::new_client::<Factory>(&config)
}
#[cfg(test)]
mod tests {
use {service_test, Factory, chain_spec};
#[test]
fn test_connectivity() {
service_test::connectivity::<Factory>(chain_spec::integration_test_config());
}
#[test]
#[cfg(feature = "rhd")]
fn test_sync() {
use client::{ImportBlock, BlockOrigin};
let alice: Arc<ed25519::Pair> = Arc::new(Keyring::Alice.into());
let bob: Arc<ed25519::Pair> = Arc::new(Keyring::Bob.into());
let validators = vec![alice.public().0.into(), bob.public().0.into()];
let keys: Vec<&ed25519::Pair> = vec![&*alice, &*bob];
let offline = Arc::new(RwLock::new(OfflineTracker::new()));
let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap();
let block_factory = |service: &<Factory as service::ServiceFactory>::FullService| {
let block_id = BlockId::number(service.client().info().unwrap().chain.best_number);
let parent_header = service.client().header(&block_id).unwrap().unwrap();
let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone());
let proposer_factory = consensus::ProposerFactory {
client: service.client().clone(),
transaction_pool: service.transaction_pool().clone(),
network: consensus_net,
offline: offline.clone(),
force_delay: 0,
handle: dummy_runtime.executor(),
};
let (proposer, _, _) = proposer_factory.init(&parent_header, &validators, alice.clone()).unwrap();
let block = proposer.propose().expect("Error making test block");
ImportBlock {
origin: BlockOrigin::File,
external_justification: Vec::new(),
internal_justification: Vec::new(),
finalized: true,
body: Some(block.extrinsics),
header: block.header,
auxiliary: Vec::new(),
}
};
let extrinsic_factory = |service: &<Factory as service::ServiceFactory>::FullService| {
let payload = (0, Call::Balances(BalancesCall::transfer(RawAddress::Id(bob.public().0.into()), 69.into())), Era::immortal(), service.client().genesis_hash());
let signature = alice.sign(&payload.encode()).into();
let id = alice.public().0.into();
let xt = UncheckedExtrinsic {
signature: Some((RawAddress::Id(id), signature, payload.0, Era::immortal())),
function: payload.1,
}.encode();
let v: Vec<u8> = Decode::decode(&mut xt.as_slice()).unwrap();
OpaqueExtrinsic(v)
};
service_test::sync::<Factory, _, _>(chain_spec::integration_test_config(), block_factory, extrinsic_factory);
}
}
+6
View File
@@ -0,0 +1,6 @@
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew install openssl cmake
curl https://sh.rustup.rs -sSf | sh
source ~/.cargo/env
cargo install --git https://github.com/paritytech/substrate subkey
cargo install --git https://github.com/paritytech/substrate substrate
+32
View File
@@ -0,0 +1,32 @@
[package]
name = "srml-assets"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
hex-literal = "0.1.0"
serde = { version = "1.0", default-features = false }
serde_derive = { version = "1.0", optional = true }
parity-codec = { version = "2.1", default-features = false }
parity-codec-derive = { version = "2.1", default-features = false }
substrate-primitives = { path = "../../core/primitives", default-features = false }
sr-std = { path = "../../core/sr-std", default-features = false }
sr-io = { path = "../../core/sr-io", default-features = false }
sr-primitives = { path = "../../core/sr-primitives", default-features = false }
srml-support = { path = "../support", default-features = false }
srml-system = { path = "../system", default-features = false }
[features]
default = ["std"]
std = [
"serde/std",
"serde_derive",
"parity-codec/std",
"parity-codec-derive/std",
"substrate-primitives/std",
"sr-std/std",
"sr-io/std",
"sr-primitives/std",
"srml-support/std",
"srml-system/std",
]
+223
View File
@@ -0,0 +1,223 @@
// Copyright 2017-2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! A simple, secure module for dealing with fungible assets.
// Ensure we're `no_std` when compiling for Wasm.
#![cfg_attr(not(feature = "std"), no_std)]
// Assert macros used in tests.
extern crate sr_std;
// Needed for tests (`with_externalities`).
#[cfg(test)]
extern crate sr_io as runtime_io;
// Needed for the set of mock primitives used in our tests.
#[cfg(test)]
extern crate substrate_primitives;
// Needed for deriving `Serialize` and `Deserialize` for various types.
// We only implement the serde traits for std builds - they're unneeded
// in the wasm runtime.
#[cfg(feature = "std")]
#[macro_use]
extern crate serde_derive;
// Needed for deriving `Encode` and `Decode` for `RawEvent`.
#[macro_use]
extern crate parity_codec_derive;
extern crate parity_codec as codec;
// Needed for type-safe access to storage DB.
#[macro_use]
extern crate srml_support as runtime_support;
// Needed for various traits. In our case, `OnFinalise`.
extern crate sr_primitives as primitives;
// `system` module provides us with all sorts of useful stuff and macros
// depend on it being around.
extern crate srml_system as system;
use primitives::traits::OnFinalise;
use runtime_support::{StorageValue, StorageMap, dispatch::Result, Parameter};
use primitives::traits::{Member, SimpleArithmetic, Zero};
use system::ensure_signed;
pub trait Trait: system::Trait {
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
/// The units in which we record balances.
type Balance: Member + Parameter + SimpleArithmetic + Default + Copy;
}
type AssetId = u32;
decl_module! {
// Simple declaration of the `Module` type. Lets the macro know what its working on.
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
/// Issue a new class of fungible assets. There are, and will only ever be, `total`
/// such assets and they'll all belong to the `origin` initially. It will have an
/// identifier `AssetId` instance: this will be specified in the `Issued` event.
fn issue(origin, total: T::Balance) -> Result;
/// Move some assets from one holder to another.
fn transfer(origin, id: AssetId, target: T::AccountId, total: T::Balance) -> Result;
/// Destroy any assets of `id` owned by `origin`.
fn destroy(origin, id: AssetId) -> Result;
}
}
/// An event in this module. Events are simple means of reporting specific conditions and
/// circumstances that have happened that users, Dapps and/or chain explorers would find
/// interesting and otherwise difficult to detect.
decl_event!(
pub enum Event<T> where <T as system::Trait>::AccountId, <T as Trait>::Balance {
/// Some assets were issued.
Issued(AssetId, AccountId, Balance),
/// Some assets were transfered.
Transfered(AssetId, AccountId, AccountId, Balance),
/// Some assets were destroyed.
Destroyed(AssetId, AccountId, Balance),
}
);
decl_storage! {
trait Store for Module<T: Trait> as Assets {
/// The number of units of assets held by any given account.
Balances: map (AssetId, T::AccountId) => T::Balance;
/// The next asset identifier up for grabs.
NextAssetId get(next_asset_id): AssetId;
}
}
// The main implementation block for the module.
impl<T: Trait> Module<T> {
/// Deposit one of this module's events.
// TODO: move into `decl_module` macro.
fn deposit_event(event: Event<T>) {
<system::Module<T>>::deposit_event(<T as Trait>::Event::from(event).into());
}
// Public immutables
/// Get the asset `id` balance of `who`.
pub fn balance(id: AssetId, who: T::AccountId) -> T::Balance {
<Balances<T>>::get((id, who))
}
// Implement Calls and add public immutables and private mutables.
fn issue(origin: T::Origin, total: T::Balance) -> Result {
let origin = ensure_signed(origin)?;
let id = Self::next_asset_id();
<NextAssetId<T>>::mutate(|id| *id += 1);
<Balances<T>>::insert((id, origin.clone()), total);
Self::deposit_event(RawEvent::Issued(id, origin, total));
Ok(())
}
fn transfer(origin: T::Origin, id: AssetId, target: T::AccountId, amount: T::Balance) -> Result {
let origin = ensure_signed(origin)?;
let origin_account = (id, origin.clone());
let origin_balance = <Balances<T>>::get(&origin_account);
ensure!(origin_balance >= amount, "origin account balance must be greater than amount");
Self::deposit_event(RawEvent::Transfered(id, origin, target.clone(), amount));
<Balances<T>>::insert(origin_account, origin_balance - amount);
<Balances<T>>::mutate((id, target), |balance| *balance += amount);
Ok(())
}
fn destroy(origin: T::Origin, id: AssetId) -> Result {
let origin = ensure_signed(origin)?;
let balance = <Balances<T>>::take((id, origin.clone()));
ensure!(!balance.is_zero(), "origin balance should be non-zero");
Self::deposit_event(RawEvent::Destroyed(id, origin, balance));
Ok(())
}
}
// This trait expresses what should happen when the block is finalised.
impl<T: Trait> OnFinalise<T::BlockNumber> for Module<T> {}
#[cfg(test)]
mod tests {
use super::*;
use runtime_io::with_externalities;
use substrate_primitives::{H256, Blake2Hasher};
// The testing primitives are very useful for avoiding having to work with signatures
// or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried.
use primitives::{BuildStorage, traits::{BlakeTwo256}, testing::{Digest, DigestItem, Header}};
impl_outer_origin! {
pub enum Origin for Test {}
}
// For testing the module, we construct most of a mock runtime. This means
// first constructing a configuration type (`Test`) which `impl`s each of the
// configuration traits of modules we want to use.
#[derive(Clone, Eq, PartialEq)]
pub struct Test;
impl system::Trait for Test {
type Origin = Origin;
type Index = u64;
type BlockNumber = u64;
type Hash = H256;
type Hashing = BlakeTwo256;
type Digest = Digest;
type AccountId = u64;
type Header = Header;
type Event = ();
type Log = DigestItem;
}
impl Trait for Test {
type Event = ();
type Balance = u64;
}
type Assets = Module<Test>;
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
system::GenesisConfig::<Test>::default().build_storage().unwrap().into()
}
#[test]
fn it_works() {
with_externalities(&mut new_test_ext(), || {
assert_ok!(Assets::issue(Origin::signed(1), 100));
assert_eq!(Assets::balance(0, 1), 100);
assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50));
assert_eq!(Assets::balance(0, 1), 50);
assert_eq!(Assets::balance(0, 2), 50);
assert_ok!(Assets::destroy(Origin::signed(2), 0));
assert_eq!(Assets::balance(0, 2), 0);
assert_noop!(Assets::transfer(Origin::signed(2), 0, 1, 50), "origin account balance must be greater than amount");
});
}
}
+34 -2
View File
@@ -41,11 +41,15 @@ extern crate substrate_primitives;
extern crate sr_io as runtime_io;
use rstd::prelude::*;
use rstd::result;
use runtime_support::{storage, Parameter};
use runtime_support::dispatch::Result;
use runtime_support::storage::StorageValue;
use runtime_support::storage::unhashed::StorageVec;
use primitives::traits::{MaybeSerializeDebug, OnFinalise, Member};
use primitives::RuntimeString;
use primitives::traits::{
MaybeSerializeDebug, OnFinalise, Member, ProvideInherent, Block as BlockT
};
use substrate_primitives::storage::well_known_keys;
use system::{ensure_signed, ensure_inherent};
@@ -235,6 +239,35 @@ impl<T: Trait> Module<T> {
}
}
impl<T: Trait> ProvideInherent for Module<T> {
type Inherent = Vec<u32>;
type Call = Call<T>;
type Error = RuntimeString;
fn create_inherent_extrinsics(data: Self::Inherent) -> Vec<(u32, Self::Call)> {
vec![(T::NOTE_OFFLINE_POSITION, Call::note_offline(data))]
}
fn check_inherent<Block: BlockT, F: Fn(&Block::Extrinsic) -> Option<&Self::Call>>(
block: &Block, data: Self::Inherent, extract_function: &F
) -> result::Result<(), Self::Error> {
let noted_offline = block
.extrinsics().get(T::NOTE_OFFLINE_POSITION as usize)
.and_then(|xt| match extract_function(&xt) {
Some(Call::note_offline(ref x)) => Some(&x[..]),
_ => None,
}).unwrap_or(&[]);
noted_offline.iter().try_for_each(|n|
if !data.contains(n) {
Err("Online node marked offline".into())
} else {
Ok(())
}
)
}
}
/// Finalization hook for the consensus module.
impl<T: Trait> OnFinalise<T::BlockNumber> for Module<T> {
fn on_finalise(_n: T::BlockNumber) {
@@ -246,4 +279,3 @@ impl<T: Trait> OnFinalise<T::BlockNumber> for Module<T> {
}
}
}
+5 -5
View File
@@ -20,15 +20,15 @@ srml-balances = { path = "../balances", default-features = false }
[features]
default = ["std"]
std = [
"sr-std/std",
"sr-io/std",
"srml-support/std",
"sr-primitives/std",
"srml-balances/std",
"serde/std",
"serde_derive",
"parity-codec/std",
"parity-codec-derive/std",
"sr-std/std",
"sr-io/std",
"sr-primitives/std",
"substrate-primitives/std",
"srml-support/std",
"srml-system/std",
"srml-balances/std",
]
+13 -16
View File
@@ -25,12 +25,15 @@ extern crate sr_std;
// Needed for tests (`with_externalities`).
#[cfg(test)]
extern crate sr_io as runtime_io;
extern crate sr_io;
// Needed for the set of mock primitives used in our tests.
#[cfg(test)]
extern crate substrate_primitives;
// Needed for various traits. In our case, `OnFinalise`.
extern crate sr_primitives;
// Needed for deriving `Serialize` and `Deserialize` for various types.
// We only implement the serde traits for std builds - they're unneeded
// in the wasm runtime.
@@ -45,10 +48,7 @@ extern crate parity_codec as codec;
// Needed for type-safe access to storage DB.
#[macro_use]
extern crate srml_support as runtime_support;
// Needed for various traits. In our case, `OnFinalise`.
extern crate sr_primitives as runtime_primitives;
extern crate srml_support as support;
// `system` module provides us with all sorts of useful stuff and macros
// depend on it being around.
extern crate srml_system as system;
@@ -57,8 +57,8 @@ extern crate srml_system as system;
// might find it useful).
extern crate srml_balances as balances;
use runtime_primitives::traits::OnFinalise;
use runtime_support::{StorageValue, dispatch::Result};
use sr_primitives::traits::OnFinalise;
use support::{StorageValue, dispatch::Result};
use system::ensure_signed;
/// Our module's configuration trait. All our types and consts go in here. If the
@@ -147,8 +147,8 @@ decl_storage! {
// e.g. pub Bar get(bar): map T::AccountId => Vec<(T::Balance, u64)>;
//
// For basic value items, you'll get a type which implements
// `runtime_support::StorageValue`. For map items, you'll get a type which
// implements `runtime_support::StorageMap`.
// `support::StorageValue`. For map items, you'll get a type which
// implements `support::StorageMap`.
//
// If they have a getter (`get(getter_name)`), then your module will come
// equipped with `fn getter_name() -> Type` for basic value items or
@@ -241,6 +241,7 @@ impl<T: Trait> Module<T> {
Ok(())
}
#[allow(dead_code)]
fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> Result {
let _sender = ensure_signed(origin)?;
@@ -278,15 +279,11 @@ impl<T: Trait> OnFinalise<T::BlockNumber> for Module<T> {
mod tests {
use super::*;
use runtime_io::with_externalities;
use sr_io::with_externalities;
use substrate_primitives::{H256, Blake2Hasher};
use runtime_primitives::BuildStorage;
use runtime_primitives::traits::{BlakeTwo256};
use runtime_primitives::testing::DigestItem;
// The testing primitives are very useful for avoiding having to work with signatures
// or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried.
use runtime_primitives::testing::{Digest, Header};
use sr_primitives::{BuildStorage, traits::{BlakeTwo256}, testing::{Digest, DigestItem, Header}};
impl_outer_origin! {
pub enum Origin for Test {}
@@ -323,7 +320,7 @@ mod tests {
// This function basically just builds a genesis storage key/value store according to
// our desired mockup.
fn new_test_ext() -> runtime_io::TestExternalities<Blake2Hasher> {
fn new_test_ext() -> sr_io::TestExternalities<Blake2Hasher> {
let mut t = system::GenesisConfig::<Test>::default().build_storage().unwrap();
// We use default for brevity, but you can configure as desired if needed.
t.extend(balances::GenesisConfig::<Test>::default().build_storage().unwrap());
+6 -6
View File
@@ -260,12 +260,12 @@ impl<
expected_index = expected_index + One::one();
}
TransactionValidity::Valid(
/*priority: */encoded_len as TransactionPriority,
/*requires: */deps,
/*provides: */vec![(sender, *index).encode()],
/*longevity: */TransactionLongevity::max_value(),
)
TransactionValidity::Valid {
priority: encoded_len as TransactionPriority,
requires: deps,
provides: vec![(sender, *index).encode()],
longevity: TransactionLongevity::max_value(),
}
} else {
return TransactionValidity::Invalid
}
+128
View File
@@ -0,0 +1,128 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
#[doc(hidden)]
pub use rstd::{result::Result, vec::Vec};
#[doc(hidden)]
pub use runtime_primitives::traits::ProvideInherent;
/// Implement the outer inherent.
/// All given modules need to implement `ProvideInherent`.
///
/// # Example
///
/// ```nocompile
/// impl_outer_inherent! {
/// pub struct InherentData where Block = Block, UncheckedExtrinsic = UncheckedExtrinsic {
/// timestamp: Timestamp export Error as TimestampInherentError,
/// consensus: Consensus,
/// }
/// }
/// ```
///
/// Additional parameters after `UncheckedExtrinsic` are `Error` and `Call`.
#[macro_export]
macro_rules! impl_outer_inherent {
(
$(#[$attr:meta])*
pub struct $name:ident where Block = $block:ident, UncheckedExtrinsic = $unchecked:ident {
$( $module:ident: $module_ty:ident $(export Error as $error_name:ident)*, )*
}
) => {
impl_outer_inherent!(
$( #[$attr] )*
pub struct $name where Block = $block, UncheckedExtrinsic = $unchecked, Error = InherentError, Call = Call {
$( $module: $module_ty $(export Error as $error_name)*, )*
}
);
};
(
$(#[$attr:meta])*
pub struct $name:ident where Block = $block:ident, UncheckedExtrinsic = $unchecked:ident, Error = $error:ident {
$( $module:ident: $module_ty:ident $(export Error as $error_name:ident)*, )*
}
) => {
impl_outer_inherent!(
$( #[$attr] )*
pub struct $name where Block = $block, UncheckedExtrinsic = $unchecked, Error = $error, Call = Call {
$( $module: $module_ty $(export Error as $error_name)*, )*
}
);
};
(
$(#[$attr:meta])*
pub struct $name:ident where Block = $block:ident, UncheckedExtrinsic = $unchecked:ident, Error = $error:ident, Call = $call:ident {
$( $module:ident: $module_ty:ident $(export Error as $error_name:ident)*, )*
}
) => {
$( #[$attr] )*
// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted.
#[derive(Encode, Decode)]
/// Inherent data to include in a block.
pub struct $name {
$( $module: <$module_ty as $crate::inherent::ProvideInherent>::Inherent, )*
}
$(
$(
pub type $error_name =<$module_ty as $crate::inherent::ProvideInherent>::Error;
)*
)*
impl $name {
/// Create a new instance.
pub fn new( $( $module: <$module_ty as $crate::inherent::ProvideInherent>::Inherent ),* ) -> Self {
Self {
$( $module, )*
}
}
fn create_inherent_extrinsics(self) -> Vec<$unchecked> {
let mut inherent = $crate::inherent::Vec::new();
$(
inherent.extend(
<$module_ty as $crate::inherent::ProvideInherent>::create_inherent_extrinsics(self.$module)
.into_iter()
.map(|v| (v.0, $unchecked::new_unsigned($call::$module_ty(v.1))))
);
)*
inherent.as_mut_slice().sort_unstable_by_key(|v| v.0);
inherent.into_iter().map(|v| v.1).collect()
}
fn check_inherents(self, block: $block) -> $crate::inherent::Result<(), $error> {
$(
<$module_ty as $crate::inherent::ProvideInherent>::check_inherent(
&block, self.$module, &|xt| match xt.function {
Call::$module_ty(ref data) => Some(data),
_ => None,
}).map_err($error::$module_ty)?;
)*
Ok(())
}
}
// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted.
#[derive(Encode)]
#[cfg_attr(feature = "std", derive(Decode))]
pub enum $error {
$( $module_ty(<$module_ty as $crate::inherent::ProvideInherent>::Error), )*
}
};
}
+3 -10
View File
@@ -21,15 +21,12 @@
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(not(feature = "std"), feature(alloc))]
#[cfg(not(feature = "std"))]
extern crate alloc;
#[cfg(feature = "std")]
extern crate serde;
extern crate sr_std as rstd;
extern crate sr_io as runtime_io;
#[cfg(feature = "std")]
#[doc(hidden)]
pub extern crate sr_primitives as runtime_primitives;
extern crate substrate_metadata;
@@ -49,12 +46,6 @@ extern crate parity_codec_derive;
pub extern crate parity_codec as codec;
pub use self::storage::generator::Storage as GenericStorage;
#[cfg(feature = "std")]
pub mod alloc {
pub use std::boxed;
pub use std::vec;
}
#[macro_use]
pub mod dispatch;
#[macro_use]
@@ -68,6 +59,8 @@ mod origin;
pub mod metadata;
#[macro_use]
mod runtime;
#[macro_use]
pub mod inherent;
pub use self::storage::{StorageVec, StorageList, StorageValue, StorageMap};
pub use self::hashable::Hashable;
+158 -17
View File
@@ -42,17 +42,21 @@
/// - `Storage`
/// - `Event` or `Event<T>` (if the event is generic)
/// - `Origin` or `Origin<T>` (if the origin is generic)
/// - `Config`
/// - `Config` or `Config<T>` (if the config is generic)
/// - `Log( $(IDENT),* )`
#[macro_export]
macro_rules! construct_runtime {
(
pub enum $runtime:ident with Log ($log_internal:ident: DigestItem<$( $log_genarg:ty ),+>) {
pub enum $runtime:ident with Log ($log_internal:ident: DigestItem<$( $log_genarg:ty ),+>)
where Block = $block:ident, UncheckedExtrinsic = $unchecked:ident
{
$( $rest:tt )*
}
) => {
construct_runtime!(
$runtime;
$block;
$unchecked;
$log_internal < $( $log_genarg ),* >;
;
$( $rest )*
@@ -60,6 +64,8 @@ macro_rules! construct_runtime {
};
(
$runtime:ident;
$block:ident;
$unchecked:ident;
$log_internal:ident <$( $log_genarg:ty ),+>;
$(
$expanded_name:ident: $expanded_module:ident::{
@@ -85,6 +91,8 @@ macro_rules! construct_runtime {
) => {
construct_runtime!(
$runtime;
$block;
$unchecked;
$log_internal < $( $log_genarg ),* >;
$(
$expanded_name: $expanded_module::{
@@ -94,7 +102,7 @@ macro_rules! construct_runtime {
$( ( $( $expanded_modules_args ),* ) )*
),*
},
)* $name: $module::{Module, Call, Storage, Event<T>, Config};
)* $name: $module::{Module, Call, Storage, Event<T>, Config<T>};
$(
$rest_name: $rest_module $(
::{
@@ -110,6 +118,8 @@ macro_rules! construct_runtime {
};
(
$runtime:ident;
$block:ident;
$unchecked:ident;
$log_internal:ident <$( $log_genarg:ty ),+>;
$(
$expanded_name:ident: $expanded_module:ident::{
@@ -142,6 +152,8 @@ macro_rules! construct_runtime {
) => {
construct_runtime!(
$runtime;
$block;
$unchecked;
$log_internal < $( $log_genarg ),* >;
$(
$expanded_name: $expanded_module::{
@@ -153,7 +165,7 @@ macro_rules! construct_runtime {
},
)*
$name: $module::{
Module, Call, Storage, Event<T>, Config,
Module, Call, Storage, Event<T>, Config<T>,
$(
$modules $( <$modules_generic> )* $( ( $( $modules_args ),* ) )*
),*
@@ -173,6 +185,8 @@ macro_rules! construct_runtime {
};
(
$runtime:ident;
$block:ident;
$unchecked:ident;
$log_internal:ident <$( $log_genarg:ty ),+>;
$(
$expanded_name:ident: $expanded_module:ident::{
@@ -204,6 +218,8 @@ macro_rules! construct_runtime {
) => {
construct_runtime!(
$runtime;
$block;
$unchecked;
$log_internal < $( $log_genarg ),* >;
$(
$expanded_name: $expanded_module::{
@@ -234,6 +250,8 @@ macro_rules! construct_runtime {
};
(
$runtime:ident;
$block:ident;
$unchecked:ident;
$log_internal:ident <$( $log_genarg:ty ),+>;
$(
$name:ident: $module:ident::{
@@ -245,6 +263,13 @@ macro_rules! construct_runtime {
}
),*;
) => {
mashup! {
$(
substrate_generate_ident_name["config-ident" $name] = $name Config;
substrate_generate_ident_name["inherent-error-ident" $name] = $name InherentError;
)*
}
#[derive(Clone, Copy, PartialEq, Eq)]
#[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))]
pub struct $runtime;
@@ -298,6 +323,15 @@ macro_rules! construct_runtime {
$name: $module::{ $( $modules $( <$modules_generic> )* ),* }
),*;
);
__decl_outer_inherent!(
$runtime;
$block;
$unchecked;
;
$(
$name: $module::{ $( $modules $( <$modules_generic> )* ),* }
),*;
);
}
}
@@ -918,7 +952,7 @@ macro_rules! __decl_outer_log {
macro_rules! __decl_outer_config {
(
$runtime:ident;
$( $parsed_modules:ident :: $parsed_name:ident ),*;
$( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*;
$name:ident: $module:ident::{
Config $(, $modules:ident $( <$modules_generic:ident> )* )*
}
@@ -928,6 +962,109 @@ macro_rules! __decl_outer_config {
) => {
__decl_outer_config!(
$runtime;
$( $parsed_modules :: $parsed_name $( < $parsed_generic > )*, )* $module::$name;
$(
$rest_name: $rest_module::{
$( $rest_modules $( <$rest_modules_generic> )* ),*
}
),*;
);
};
(
$runtime:ident;
$( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*;
$name:ident: $module:ident::{
Config<T> $(, $modules:ident $( <$modules_generic:ident> )* )*
}
$(, $rest_name:ident : $rest_module:ident::{
$( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),*
})*;
) => {
__decl_outer_config!(
$runtime;
$( $parsed_modules :: $parsed_name $( < $parsed_generic > )*, )* $module::$name<T>;
$(
$rest_name: $rest_module::{
$( $rest_modules $( <$rest_modules_generic> )* ),*
}
),*;
);
};
(
$runtime:ident;
$( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*;
$name:ident: $module:ident::{
$ingore:ident $( <$ignor:ident> )* $(, $modules:ident $( <$modules_generic:ident> )* )*
}
$(, $rest_name:ident : $rest_module:ident::{
$( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),*
})*;
) => {
__decl_outer_config!(
$runtime;
$( $parsed_modules :: $parsed_name $( < $parsed_generic > )*),*;
$name: $module::{ $( $modules $( <$modules_generic> )* ),* }
$(
, $rest_name: $rest_module::{
$( $rest_modules $( <$rest_modules_generic> )* ),*
}
)*;
);
};
(
$runtime:ident;
$( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*;
$name:ident: $module:ident::{}
$(, $rest_name:ident : $rest_module:ident::{
$( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),*
})*;
) => {
__decl_outer_config!(
$runtime;
$( $parsed_modules :: $parsed_name $( < $parsed_generic > )*),*;
$(
$rest_name: $rest_module::{
$( $rest_modules $( <$rest_modules_generic> )* ),*
}
),*;
);
};
(
$runtime:ident;
$( $parsed_modules:ident :: $parsed_name:ident $( < $parsed_generic:ident > )* ),*;
;
) => {
substrate_generate_ident_name! {
impl_outer_config!(
pub struct GenesisConfig for $runtime {
$(
"config-ident" $parsed_name => $parsed_modules $( < $parsed_generic > )*,
)*
}
);
}
};
}
#[macro_export]
#[doc(hidden)]
macro_rules! __decl_outer_inherent {
(
$runtime:ident;
$block:ident;
$unchecked:ident;
$( $parsed_modules:ident :: $parsed_name:ident ),*;
$name:ident: $module:ident::{
Inherent $(, $modules:ident $( <$modules_generic:ident> )* )*
}
$(, $rest_name:ident : $rest_module:ident::{
$( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),*
})*;
) => {
__decl_outer_inherent!(
$runtime;
$block;
$unchecked;
$( $parsed_modules :: $parsed_name, )* $module::$name;
$(
$rest_name: $rest_module::{
@@ -938,6 +1075,8 @@ macro_rules! __decl_outer_config {
};
(
$runtime:ident;
$block:ident;
$unchecked:ident;
$( $parsed_modules:ident :: $parsed_name:ident ),*;
$name:ident: $module:ident::{
$ingore:ident $( <$ignor:ident> )* $(, $modules:ident $( <$modules_generic:ident> )* )*
@@ -946,8 +1085,10 @@ macro_rules! __decl_outer_config {
$( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),*
})*;
) => {
__decl_outer_config!(
__decl_outer_inherent!(
$runtime;
$block;
$unchecked;
$( $parsed_modules :: $parsed_name ),*;
$name: $module::{ $( $modules $( <$modules_generic> )* ),* }
$(
@@ -959,14 +1100,18 @@ macro_rules! __decl_outer_config {
};
(
$runtime:ident;
$block:ident;
$unchecked:ident;
$( $parsed_modules:ident :: $parsed_name:ident ),*;
$name:ident: $module:ident::{}
$(, $rest_name:ident : $rest_module:ident::{
$( $rest_modules:ident $( <$rest_modules_generic:ident> )* ),*
})*;
) => {
__decl_outer_config!(
__decl_outer_inherent!(
$runtime;
$block;
$unchecked;
$( $parsed_modules :: $parsed_name ),*;
$(
$rest_name: $rest_module::{
@@ -977,20 +1122,16 @@ macro_rules! __decl_outer_config {
};
(
$runtime:ident;
$block:ident;
$unchecked:ident;
$( $parsed_modules:ident :: $parsed_name:ident ),*;
;
) => {
mashup! {
$(
substrate_generate_config_name["config-name" $parsed_name] = $parsed_name Config;
)*
}
substrate_generate_config_name! {
impl_outer_config!(
pub struct GenesisConfig for $runtime {
substrate_generate_ident_name! {
impl_outer_inherent!(
pub struct InherentData where Block = $block, UncheckedExtrinsic = $unchecked {
$(
"config-name" $parsed_name => $parsed_modules,
$parsed_modules: $parsed_name export Error as "inherent-error-ident" $parsed_name,
)*
}
);
@@ -590,6 +590,21 @@ macro_rules! __generate_genesis_config {
);
};
// Do not generate any `GenesisConfig`, if we not require it.
(@GEN
[$traittype:ident $traitinstance:ident]
// normal getters
[]
// for normal builders
[$( $normalclassname:ident ($normalbuild:expr) ;)*]
// for map builders
[$( $mapclassname:ident ($mapbuild:expr) ;)*]
// extra genesis fields
[]
// final build storage call
[$call:expr]
) => {};
(@GEN
[$traittype:ident $traitinstance:ident]
// normal getters
+2
View File
@@ -7,6 +7,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
hex-literal = "0.1.0"
serde = { version = "1.0", default-features = false }
serde_derive = { version = "1.0", optional = true }
parity-codec-derive = { version = "2.1", default-features = false }
parity-codec = { version = "2.1", default-features = false }
substrate-primitives = { path = "../../core/primitives", default-features = false }
sr-std = { path = "../../core/sr-std", default-features = false }
@@ -29,6 +30,7 @@ std = [
"srml-consensus/std",
"serde/std",
"serde_derive",
"parity-codec-derive/std",
"parity-codec/std",
"substrate-primitives/std",
"srml-system/std",
+47 -5
View File
@@ -33,6 +33,7 @@
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg_attr(not(feature = "std"), macro_use)]
extern crate sr_std as rstd;
#[macro_use]
@@ -50,14 +51,18 @@ extern crate sr_primitives as runtime_primitives;
extern crate srml_system as system;
extern crate srml_consensus as consensus;
extern crate parity_codec as codec;
#[macro_use]
extern crate parity_codec_derive;
use codec::HasCompact;
use runtime_support::{StorageValue, Parameter};
use runtime_support::dispatch::Result;
use runtime_primitives::traits::{As, OnFinalise, SimpleArithmetic, Zero};
use runtime_primitives::RuntimeString;
use runtime_primitives::traits::{
As, OnFinalise, SimpleArithmetic, Zero, ProvideInherent, Block as BlockT, Extrinsic
};
use system::ensure_inherent;
use rstd::ops::{Mul, Div};
use rstd::{result, ops::{Mul, Div}, vec::Vec};
pub trait Trait: consensus::Trait + system::Trait {
/// The position of the required timestamp-set extrinsic.
@@ -106,7 +111,7 @@ impl<T: Trait> Module<T> {
fn set(origin: T::Origin, now: <T::Moment as HasCompact>::Type) -> Result {
ensure_inherent(origin)?;
let now = now.into();
assert!(!<Self as Store>::DidUpdate::exists(), "Timestamp must be updated only once in the block");
assert!(
<system::Module<T>>::extrinsic_index() == Some(T::TIMESTAMP_SET_POSITION),
@@ -123,12 +128,49 @@ impl<T: Trait> Module<T> {
}
/// Set the timestamp to something in particular. Only used for tests.
#[cfg(any(feature = "std", test))]
#[cfg(feature = "std")]
pub fn set_timestamp(now: T::Moment) {
<Self as Store>::Now::put(now);
}
}
#[derive(Encode)]
#[cfg_attr(feature = "std", derive(Decode))]
pub enum InherentError {
Other(RuntimeString),
TimestampInFuture(u64),
}
impl<T: Trait> ProvideInherent for Module<T> {
type Inherent = T::Moment;
type Call = Call<T>;
type Error = InherentError;
fn create_inherent_extrinsics(data: Self::Inherent) -> Vec<(u32, Self::Call)> {
vec![(T::TIMESTAMP_SET_POSITION, Call::set(data.into()))]
}
fn check_inherent<Block: BlockT, F: Fn(&Block::Extrinsic) -> Option<&Self::Call>>(
block: &Block, data: Self::Inherent, extract_function: &F
) -> result::Result<(), Self::Error> {
const MAX_TIMESTAMP_DRIFT: u64 = 60;
let xt = block.extrinsics().get(T::TIMESTAMP_SET_POSITION as usize)
.ok_or_else(|| InherentError::Other("No valid timestamp inherent in block".into()))?;
let t = match (xt.is_signed(), extract_function(&xt)) {
(Some(false), Some(Call::set(ref t))) => t.clone(),
_ => return Err(InherentError::Other("No valid timestamp inherent in block".into())),
}.into().as_();
if t > data.as_() + MAX_TIMESTAMP_DRIFT {
Err(InherentError::TimestampInFuture(t))
} else {
Ok(())
}
}
}
impl<T: Trait> OnFinalise<T::BlockNumber> for Module<T> {
fn on_finalise(_n: T::BlockNumber) {
assert!(<Self as Store>::DidUpdate::take(), "Timestamp must be updated once in the block");
+1 -1
View File
@@ -85,7 +85,7 @@ decl_module! {
fn configure(proposal_bond: Permill, proposal_bond_minimum: <T::Balance as HasCompact>::Type, spend_period: <T::BlockNumber as HasCompact>::Type, burn: Permill) -> Result;
// Reject a proposed spend. The original deposit will be slashed.
fn reject_proposal(origin, roposal_id: Compact<ProposalIndex>) -> Result;
fn reject_proposal(origin, proposal_id: Compact<ProposalIndex>) -> Result;
// Approve a proposal. At a later time, the proposal will be allocated to the beneficiary
// and the original deposit will be returned.
+11 -12
View File
@@ -34,17 +34,12 @@ fn main() {
match matches.subcommand() {
("vanity", Some(matches)) => {
let desired: String = matches.value_of("pattern").map(str::to_string).unwrap_or_default();
let amount_of_keys = matches.value_of("number")
.expect("`number` has a default value; thus it can't be None; qed");
let amount_of_keys: usize = amount_of_keys.parse::<usize>().expect("Failed to parse number");
let keys = vanity::generate_key(&desired, amount_of_keys, true).expect("Key generation failed");
for key in keys {
println!("{} - {} ({}%)",
key.pair.public().to_ss58check(),
HexDisplay::from(&key.seed),
key.score);
}
let key = vanity::generate_key(&desired).expect("Key generation failed");
println!("Seed {} (hex: 0x{}) - {} ({}%)",
key.pair.public().to_ss58check(),
HexDisplay::from(&key.pair.public().0),
HexDisplay::from(&key.seed),
key.score);
}
("restore", Some(matches)) => {
let mut raw_seed = matches.value_of("seed")
@@ -63,7 +58,11 @@ fn main() {
seed[..len].copy_from_slice(&raw_seed[..len]);
let pair = Pair::from_seed(&seed);
println!("{}: {}", HexDisplay::from(&seed), pair.public().to_ss58check());
println!("Seed 0x{} is account:\n SS58: {}\n Hex: 0x{}",
HexDisplay::from(&seed),
pair.public().to_ss58check(),
HexDisplay::from(&pair.public().0)
);
},
_ => print_usage(&matches),
}
+16 -35
View File
@@ -16,7 +16,6 @@
use rand::{OsRng, Rng};
use substrate_primitives::ed25519::Pair;
use std::cmp;
fn good_waypoint(done: u64) -> u64 {
match done {
@@ -52,29 +51,25 @@ fn calculate_score(_desired: &str, key: &str) -> usize {
let snip_size = _desired.len() - truncate;
let truncated = &_desired[0..snip_size];
if let Some(pos) = key.find(truncated) {
let score = cmp::min(100, (51 - pos) + (snip_size * 50 / _desired.len()));
return score;
return (47 - pos) + (snip_size * 48);
}
}
0
}
pub fn generate_key(_desired: &str, _amount: usize, paranoiac: bool) -> Result<Vec<KeyPair>, &str> {
println!("Generating {} keys with pattern '{}'", _amount, &_desired);
pub fn generate_key(_desired: &str) -> Result<KeyPair, &str> {
println!("Generating key containing pattern '{}'", _desired);
let top = 30 + (_desired.len() * 32);
let top = 45 + (_desired.len() * 48);
let mut best = 0;
let mut seed = [0u8; 32];
let mut done = 0;
let mut res = vec![];
OsRng::new().unwrap().fill_bytes(&mut seed[..]);
loop {
if res.len() >= _amount { break; }
// reset to a new random seed at beginning and regularly after for paranoia.
if paranoiac || done % 100000 == 0 {
// reset to a new random seed at beginning and regularly thereafter
if done % 100000 == 0 {
OsRng::new().unwrap().fill_bytes(&mut seed[..]);
}
@@ -88,22 +83,18 @@ pub fn generate_key(_desired: &str, _amount: usize, paranoiac: bool) -> Result<V
seed: seed.clone(),
score: score,
};
res.push(keypair);
if best == top {
if best >= top {
println!("best: {} == top: {}", best, top);
break;
return Ok(keypair);
}
}
seed = next_seed(seed);
done += 1;
if done % good_waypoint(done) == 0 {
println!("Stopping after {} keys searched", done);
break;
println!("{} keys searched; best is {}/{} complete", done, best, top);
}
}
res.sort_unstable_by(|a, b| b.score.cmp(&a.score));
Ok(res)
}
#[cfg(test)]
@@ -112,49 +103,39 @@ mod tests {
#[cfg(feature = "bench")]
use test::Bencher;
#[test]
fn test_generation_no_args() {
assert!(generate_key("",1, false).unwrap().len() == 1);
}
#[test]
fn test_generation_with_single_char() {
assert!(generate_key("j", 1, false).unwrap().len() == 1);
}
#[test]
fn test_generation_with_args() {
assert!(generate_key("polka", 2, false).unwrap().len() == 2);
assert!(generate_key("j").unwrap().pair.public().to_ss58check().contains("j"));
}
#[test]
fn test_score_1_char_100() {
let score = calculate_score("j", "5jolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim");
assert!(score == 100, format!("Wrong score, we found {}", score));
assert_eq!(score, 94);
}
#[test]
fn test_score_100() {
let score = calculate_score("Polkadot", "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim");
assert!( score == 100, format!("Wrong score, we found {}", score));
assert_eq!(score, 430);
}
#[test]
fn test_score_50_2() {
// 50% for the position + 50% for the size
assert!(calculate_score("Polkadot", "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim") == 75);
assert_eq!(calculate_score("Polkadot", "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"), 238);
}
#[test]
fn test_score_0() {
assert!(calculate_score("Polkadot", "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK") == 0);
assert_eq!(calculate_score("Polkadot", "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK"), 0);
}
#[cfg(feature = "bench")]
#[bench]
fn bench_paranoiac(b: &mut Bencher) {
b.iter(|| {
generate_key("polka", 3, true)
generate_key("polk")
});
}
@@ -162,7 +143,7 @@ mod tests {
#[bench]
fn bench_not_paranoiac(b: &mut Bencher) {
b.iter(|| {
generate_key("polka", 3, false)
generate_key("polk")
});
}
}