best_containing operations (issue 603) (#740)

* add stub for Client.best_chain_containing_block_hash

* add fn blockchain::Backend::leaf_hashes

* fix typo

* sketch out Client.best_chain_containing_block_hash

* fix indent

* Blockchain.leaf_hashes -> Blockchain.leaves

* add unimplemented! stub impls for Blockchain.leaves

* start impl of Blockchain.leaves for in-memory client db

* Client.best_chain_containing...: check canonical first and make compile

* first rough attempt at maintaining leaf list in in-memory db

* fix tab indent

* add test best_chain_containing_single_block

* add failing test best_chain_containing_with_fork

* pub use client::blockchain; in test-client to prevent circular dep in client tests

* best_chain_containing_with_single_block: improve and test leaves

* far improve in-memory Backend::leaves impl

* test blockchain::Backend::leaves more thoroughly

* handle more edge cases in blockchain::Backend::leaves impl for in memory

* fix test best_chain_containing_with_fork (two distinct test blocks had same hash)

* make best_chain_containing_block_hash pass existing tests

* improve docstring for Blockchain::leaves

* Client.best_chain_containing: some cleanup. support max_block_number

* best_chain_containing: remove broken outcommented fast check for best = canonical

* remove blank line

* best_block_containing: return None if target_hash not found

* best_chain_containing: add unreachable! at end of function

* improve tests for best_chain_containing

* renames

* more elaborate test scenario for best_containing

* best_containing: fix restriction of search through maybe_max_number

* best_containing: tests for restriction of search

* get rid of unnecessary clones

* replace Client::new_in_mem by new_with_backend which is useful for testing backends

* add test_client::new_with_backend for testing different backend impls

* add test for in_mem::Backend::leaves

* remove unused imports

* in_mem test_leaves: simplify

* flesh out tests for in_mem leaves impl

* remove tests for leaves from client which are now covered in implementing module

* improve comment

* add Client.new_in_mem again

* unwrap in test_client::new_with_backend

* make test_client::BlockBuilderExt work not just with in-mem backend

* make test client ext not just work with in mem backend

* add failing Backend.leaves test for client-db

* update Cargo.lock

* replace KeccakHasher with Blake2Hasher

* refactor

address grumble https://github.com/paritytech/substrate/pull/740#discussion_r217822862

* refactor using NumberFor

address grumble https://github.com/paritytech/substrate/pull/740#discussion_r217823341

* add test that exposes possible problem

* update docstring for Client.best_containing

* extract test for Backend.leaves for reuse

* improve test blockchain_header_and_hash_return_blocks_from_canonical_chain_given_block_numbers

* extract test_blockchain_query_by_number_gets_canonical to easily test multiple impls

* remove whitespace

* remove todo

* Client.best_containing: pre-empt search loop when target in canonical

* best_containing: prevent race condition by holding import lock

* add todo

* extract leaf list update code into function

* add comment

* client-db: use in-memory-kvdb for tests

* use BTreeSet to store leaves for in-mem which is faster and simpler

* add docstring

* add comments and fix formatting

* add initial raw version of LeafSet

* remove Client::update_leaves which has been superceded by LeafSet

* use LeafSet in in-mem backend

* keccak -> blake2

* don't reexport codec traits in primitives

addresses https://github.com/paritytech/substrate/pull/740#discussion_r219538185

* fix rebase mistake

* improve LeafSet and use it in state-db

* correct Transfer nonces to fix ApplyExtinsicFailed(Stale)

* use given backend in canoncal test

* kill dead tree-route code in util

* fix warnings

* tests for leafset

* reorganizations in in_mem backend

* fix reorganization canon block logic

* DB commit and safe reversion on write error

* fix style nits
This commit is contained in:
snd
2018-09-26 20:34:05 +09:00
committed by Robert Habermeier
parent 1438e15925
commit 58cc0992df
16 changed files with 1041 additions and 57 deletions
+4
View File
@@ -2913,6 +2913,8 @@ dependencies = [
"hash-db 0.9.0 (git+https://github.com/paritytech/trie)",
"heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
"hex-literal 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"kvdb-memorydb 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"memory-db 0.9.0 (git+https://github.com/paritytech/trie)",
"parity-codec 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2949,9 +2951,11 @@ dependencies = [
"sr-primitives 0.1.0",
"substrate-client 0.1.0",
"substrate-executor 0.1.0",
"substrate-keyring 0.1.0",
"substrate-primitives 0.1.0",
"substrate-state-db 0.1.0",
"substrate-state-machine 0.1.0",
"substrate-test-client 0.1.0",
"substrate-trie 0.4.0",
]
+2
View File
@@ -26,7 +26,9 @@ substrate-telemetry = { path = "../telemetry" }
hash-db = { git = "https://github.com/paritytech/trie" }
trie-db = { git = "https://github.com/paritytech/trie" }
rlp = "0.2.4"
kvdb = "0.1"
memory-db = { git = "https://github.com/paritytech/trie" }
[dev-dependencies]
substrate-test-client = { path = "../test-client" }
kvdb-memorydb = "0.1"
+2
View File
@@ -22,3 +22,5 @@ substrate-trie = { path = "../../trie" }
[dev-dependencies]
kvdb-memorydb = "0.1"
substrate-keyring = { path = "../../keyring" }
substrate-test-client = { path = "../../test-client" }
+53 -11
View File
@@ -47,6 +47,9 @@ extern crate log;
#[macro_use]
extern crate parity_codec_derive;
#[cfg(test)]
extern crate substrate_test_client as test_client;
#[cfg(test)]
extern crate kvdb_memorydb;
@@ -74,6 +77,7 @@ use state_machine::backend::Backend as StateBackend;
use executor::RuntimeInfo;
use state_machine::{CodeExecutor, DBValue, ExecutionStrategy};
use utils::{Meta, db_err, meta_keys, open_database, read_db, read_id, read_meta};
use client::LeafSet;
use state_db::StateDb;
pub use state_db::PruningMode;
@@ -141,15 +145,18 @@ impl<'a> state_db::MetaDb for StateMetaDb<'a> {
/// Block database
pub struct BlockchainDb<Block: BlockT> {
db: Arc<KeyValueDB>,
meta: RwLock<Meta<<Block::Header as HeaderT>::Number, Block::Hash>>,
meta: RwLock<Meta<NumberFor<Block>, Block::Hash>>,
leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
}
impl<Block: BlockT> BlockchainDb<Block> {
fn new(db: Arc<KeyValueDB>) -> Result<Self, client::error::Error> {
let meta = read_meta::<Block>(&*db, columns::HEADER)?;
let leaves = LeafSet::read_from_db(&*db, columns::HEADER, meta_keys::LEAF_PREFIX)?;
Ok(BlockchainDb {
db,
meta: RwLock::new(meta)
leaves: RwLock::new(leaves),
meta: RwLock::new(meta),
})
}
@@ -249,6 +256,10 @@ impl<Block: BlockT> client::blockchain::Backend<Block> for BlockchainDb<Block> {
fn cache(&self) -> Option<&client::blockchain::Cache<Block>> {
None
}
fn leaves(&self) -> Result<Vec<Block::Hash>, client::error::Error> {
Ok(self.leaves.read().hashes())
}
}
/// Database transaction
@@ -534,6 +545,7 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
let mut transaction = DBTransaction::new();
if let Some(pending_block) = operation.pending_block {
let hash = pending_block.header.hash();
let parent_hash = *pending_block.header.parent_hash();
let number = pending_block.header.number().clone();
transaction.put(columns::HEADER, hash.as_ref(), &pending_block.header.encode());
@@ -549,7 +561,6 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
// cannot find tree route with empty DB.
if meta.best_hash != Default::default() {
let parent_hash = *pending_block.header.parent_hash();
let tree_route = ::client::blockchain::tree_route(
&self.blockchain,
BlockId::Hash(meta.best_hash),
@@ -624,10 +635,25 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number,
pending_block.leaf_state.is_best());
self.storage.db.write(transaction).map_err(db_err)?;
{
let mut leaves = self.blockchain.leaves.write();
let displaced_leaf = leaves.import(hash, number, parent_hash);
leaves.prepare_transaction(&mut transaction, columns::HEADER, meta_keys::LEAF_PREFIX);
let write_result = self.storage.db.write(transaction).map_err(db_err);
if let Err(e) = write_result {
// revert leaves set update, if there was one.
if let Some(displaced_leaf) = displaced_leaf {
leaves.undo(displaced_leaf);
}
return Err(e);
}
drop(leaves);
}
self.blockchain.update_meta(
hash,
number,
hash.clone(),
number.clone(),
pending_block.leaf_state.is_best(),
finalized,
);
@@ -668,14 +694,15 @@ impl<Block> client::backend::Backend<Block, Blake2Hasher> for Backend<Block> whe
apply_state_commit(&mut transaction, commit);
let removed = best.clone();
best -= As::sa(1);
let hash = self.blockchain.hash(best)?.ok_or_else(
let header = self.blockchain.header(BlockId::Number(best))?.ok_or_else(
|| client::error::ErrorKind::UnknownBlock(
format!("Error reverting to {}. Block hash not found.", best)))?;
format!("Error reverting to {}. Block header not found.", best)))?;
transaction.put(columns::META, meta_keys::BEST_BLOCK, hash.as_ref());
transaction.put(columns::META, meta_keys::BEST_BLOCK, header.hash().as_ref());
transaction.delete(columns::HASH_LOOKUP, &::utils::number_to_lookup_key(removed));
self.storage.db.write(transaction).map_err(db_err)?;
self.blockchain.update_meta(hash, best, true, false);
self.blockchain.update_meta(header.hash().clone(), best.clone(), true, false);
self.blockchain.leaves.write().revert(header.hash().clone(), header.number().clone(), header.parent_hash().clone());
}
None => return Ok(As::sa(c))
}
@@ -723,6 +750,7 @@ mod tests {
use client::blockchain::HeaderBackend as BlockchainHeaderBackend;
use runtime_primitives::testing::{Header, Block as RawBlock};
use state_machine::{TrieMut, TrieDBMut, ChangesTrieStorage};
use test_client;
type Block = RawBlock<u64>;
@@ -846,7 +874,7 @@ mod tests {
op.reset_storage(storage.iter().cloned()).unwrap();
op.set_block_data(
header,
header.clone(),
Some(vec![]),
None,
NewBlockState::Best,
@@ -1101,4 +1129,18 @@ mod tests {
assert!(tree_route.enacted().is_empty());
}
}
#[test]
fn test_leaves_with_complex_block_tree() {
let backend: Arc<Backend<test_client::runtime::Block>> = Arc::new(Backend::new_test(20));
test_client::trait_tests::test_leaves_for_backend(backend);
}
#[test]
fn test_blockchain_query_by_number_gets_canonical() {
let backend: Arc<Backend<test_client::runtime::Block>> = Arc::new(Backend::new_test(20));
test_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend);
}
}
+19 -4
View File
@@ -24,7 +24,7 @@ use kvdb::{KeyValueDB, DBTransaction};
use client::backend::NewBlockState;
use client::blockchain::{BlockStatus, Cache as BlockchainCache,
HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo};
use client::cht;
use client::{cht, LeafSet};
use client::error::{ErrorKind as ClientErrorKind, Result as ClientResult};
use client::light::blockchain::Storage as LightBlockchainStorage;
use codec::{Decode, Encode};
@@ -53,6 +53,7 @@ pub(crate) const AUTHORITIES_ENTRIES_TO_KEEP: u64 = cht::SIZE;
pub struct LightStorage<Block: BlockT> {
db: Arc<KeyValueDB>,
meta: RwLock<Meta<<<Block as BlockT>::Header as HeaderT>::Number, Block::Hash>>,
leaves: RwLock<LeafSet<Block::Hash, NumberFor<Block>>>,
_cache: DbCache<Block>,
}
@@ -92,10 +93,12 @@ impl<Block> LightStorage<Block>
columns::AUTHORITIES
)?;
let meta = RwLock::new(read_meta::<Block>(&*db, columns::HEADER)?);
let leaves = RwLock::new(LeafSet::read_from_db(&*db, columns::HEADER, meta_keys::LEAF_PREFIX)?);
Ok(LightStorage {
db,
meta,
leaves,
_cache: cache,
})
}
@@ -239,6 +242,7 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
let hash = header.hash();
let number = *header.number();
let parent_hash = *header.parent_hash();
transaction.put(columns::HEADER, hash.as_ref(), &header.encode());
transaction.put(columns::HASH_LOOKUP, &number_to_lookup_key(number), hash.as_ref());
@@ -250,7 +254,6 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
{
let meta = self.meta.read();
if meta.best_hash != Default::default() {
let parent_hash = *header.parent_hash();
let tree_route = ::client::blockchain::tree_route(
self,
BlockId::Hash(meta.best_hash),
@@ -294,8 +297,20 @@ impl<Block> LightBlockchainStorage<Block> for LightStorage<Block>
self.note_finalized(&mut transaction, &header, hash)?;
}
debug!("Light DB Commit {:?} ({})", hash, number);
self.db.write(transaction).map_err(db_err)?;
{
let mut leaves = self.leaves.write();
let displaced_leaf = leaves.import(hash, number, parent_hash);
debug!("Light DB Commit {:?} ({})", hash, number);
let write_result = self.db.write(transaction).map_err(db_err);
if let Err(e) = write_result {
// revert leaves set update if there was one.
if let Some(displaced_leaf) = displaced_leaf {
leaves.undo(displaced_leaf);
}
return Err(e);
}
}
self.update_meta(hash, number, leaf_state.is_best(), finalized);
Ok(())
+2
View File
@@ -48,6 +48,8 @@ pub mod meta_keys {
pub const BEST_AUTHORITIES: &[u8; 4] = b"auth";
/// Genesis block hash.
pub const GENESIS_HASH: &[u8; 3] = b"gen";
/// Leaves prefix list key.
pub const LEAF_PREFIX: &[u8; 4] = b"leaf";
}
/// Database metadata.
+5
View File
@@ -53,6 +53,11 @@ pub trait Backend<Block: BlockT>: HeaderBackend<Block> {
/// Returns data cache reference, if it is enabled on this backend.
fn cache(&self) -> Option<&Cache<Block>>;
/// Returns hashes of all blocks that are leaves of the block tree.
/// in other words, that have no children, are chain heads.
/// Results must be ordered best (longest, heighest) chain first.
fn leaves(&self) -> Result<Vec<Block::Hash>>;
}
/// Blockchain optional data cache.
+379 -4
View File
@@ -57,7 +57,7 @@ pub struct Client<B, E, Block> where Block: BlockT {
execution_strategy: ExecutionStrategy,
}
/// A source of blockchain evenets.
/// A source of blockchain events.
pub trait BlockchainEvents<Block: BlockT> {
/// Get block import event stream. Not guaranteed to be fired for every
/// imported block.
@@ -190,9 +190,25 @@ pub fn new_in_mem<E, Block, S>(
Block: BlockT,
H256: From<Block::Hash>,
{
let backend = Arc::new(in_mem::Backend::new());
let executor = LocalCallExecutor::new(backend.clone(), executor);
Client::new(backend, executor, genesis_storage, ExecutionStrategy::NativeWhenPossible)
new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage)
}
/// Create a client with the explicitely provided backend.
/// This is useful for testing backend implementations.
pub fn new_with_backend<B, E, Block, S>(
backend: Arc<B>,
executor: E,
build_genesis_storage: S,
) -> error::Result<Client<B, LocalCallExecutor<B, E>, Block>>
where
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
S: BuildStorage,
Block: BlockT,
H256: From<Block::Hash>,
B: backend::LocalBackend<Block, Blake2Hasher>
{
let call_executor = LocalCallExecutor::new(backend.clone(), executor);
Client::new(backend, call_executor, build_genesis_storage, ExecutionStrategy::NativeWhenPossible)
}
impl<B, E, Block> Client<B, E, Block> where
@@ -723,6 +739,105 @@ impl<B, E, Block> Client<B, E, Block> where
let info = self.backend.blockchain().info().map_err(|e| error::Error::from_blockchain(Box::new(e)))?;
Ok(self.header(&BlockId::Hash(info.best_hash))?.expect("Best block header must always exist"))
}
/// Get the most recent block hash of the best (longest) chains
/// that contain block with the given `target_hash`.
/// If `maybe_max_block_number` is `Some(max_block_number)`
/// the search is limited to block `numbers <= max_block_number`.
/// in other words as if there were no blocks greater `max_block_number`.
/// TODO [snd] possibly implement this on blockchain::Backend and just redirect here
/// Returns `Ok(None)` if `target_hash` is not found in search space.
/// TODO [snd] write down time complexity
pub fn best_containing(&self, target_hash: Block::Hash, maybe_max_number: Option<NumberFor<Block>>) -> error::Result<Option<Block::Hash>> {
let target_header = {
match self.backend.blockchain().header(BlockId::Hash(target_hash))? {
Some(x) => x,
// target not in blockchain
None => { return Ok(None); },
}
};
if let Some(max_number) = maybe_max_number {
// target outside search range
if target_header.number() > &max_number {
return Ok(None);
}
}
let (leaves, best_already_checked) = {
// ensure no blocks are imported during this code block.
// an import could trigger a reorg which could change the canonical chain.
// we depend on the canonical chain staying the same during this code block.
let _import_lock = self.import_lock.lock();
let info = self.backend.blockchain().info()?;
let canon_hash = self.backend.blockchain().hash(*target_header.number())?
.ok_or_else(|| error::Error::from(format!("failed to get hash for block number {}", target_header.number())))?;
if canon_hash == target_hash {
if let Some(max_number) = maybe_max_number {
// something has to guarantee that max_number is in chain
return Ok(Some(self.backend.blockchain().hash(max_number)?.ok_or_else(|| error::Error::from(format!("failed to get hash for block number {}", max_number)))?));
} else {
return Ok(Some(info.best_hash));
}
}
(self.backend.blockchain().leaves()?, info.best_hash)
};
// for each chain. longest chain first. shortest last
for leaf_hash in leaves {
// ignore canonical chain which we already checked above
if leaf_hash == best_already_checked {
continue;
}
// start at the leaf
let mut current_hash = leaf_hash;
// if search is not restricted then the leaf is the best
let mut best_hash = leaf_hash;
// go backwards entering the search space
// waiting until we are <= max_number
if let Some(max_number) = maybe_max_number {
loop {
// TODO [snd] this should be a panic
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;
if current_header.number() <= &max_number {
best_hash = current_header.hash();
break;
}
current_hash = *current_header.parent_hash();
}
}
// go backwards through the chain (via parent links)
loop {
// until we find target
if current_hash == target_hash {
return Ok(Some(best_hash));
}
// TODO [snd] this should be a panic
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;
// stop search in this chain once we go below the target's block number
if current_header.number() < target_header.number() {
break;
}
current_hash = *current_header.parent_hash();
}
}
unreachable!("this is a bug. `target_hash` is in blockchain but wasn't found following all leaves backwards");
}
}
impl<B, E, Block> CurrentHeight for Client<B, E, Block> where
@@ -946,4 +1061,264 @@ mod tests {
assert!(client.state_at(&BlockId::Number(1)).unwrap() != client.state_at(&BlockId::Number(0)).unwrap());
assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1)
}
#[test]
fn best_containing_with_genesis_block() {
// block tree:
// G
let client = test_client::new();
let genesis_hash = client.info().unwrap().chain.genesis_hash;
assert_eq!(genesis_hash.clone(), client.best_containing(genesis_hash.clone(), None).unwrap().unwrap());
}
#[test]
fn best_containing_with_hash_not_found() {
// block tree:
// G
let client = test_client::new();
let uninserted_block = client.new_block().unwrap().bake().unwrap();
assert_eq!(None, client.best_containing(uninserted_block.hash().clone(), None).unwrap());
}
#[test]
fn best_containing_with_single_chain_3_blocks() {
// block tree:
// G -> A1 -> A2
let client = test_client::new();
// G -> A1
let a1 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
// A1 -> A2
let a2 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
let genesis_hash = client.info().unwrap().chain.genesis_hash;
assert_eq!(a2.hash(), client.best_containing(genesis_hash, None).unwrap().unwrap());
assert_eq!(a2.hash(), client.best_containing(a1.hash(), None).unwrap().unwrap());
assert_eq!(a2.hash(), client.best_containing(a2.hash(), None).unwrap().unwrap());
}
#[test]
fn best_containing_with_multiple_forks() {
// NOTE: we use the version of the trait from `test_client`
// because that is actually different than the version linked to
// in the test facade crate.
use test_client::blockchain::Backend as BlockchainBackendT;
// block tree:
// G -> A1 -> A2 -> A3 -> A4 -> A5
// A1 -> B2 -> B3 -> B4
// B2 -> C3
// A1 -> D2
let client = test_client::new();
// G -> A1
let a1 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
// A1 -> A2
let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
// A2 -> A3
let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a3.clone()).unwrap();
// A3 -> A4
let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a4.clone()).unwrap();
// A4 -> A5
let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a5.clone()).unwrap();
// A1 -> B2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
// this push is required as otherwise B2 has the same hash as A2 and won't get imported
builder.push_transfer(Transfer {
from: Keyring::Alice.to_raw_public().into(),
to: Keyring::Ferdie.to_raw_public().into(),
amount: 41,
nonce: 0,
}).unwrap();
let b2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b2.clone()).unwrap();
// B2 -> B3
let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b3.clone()).unwrap();
// B3 -> B4
let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b4.clone()).unwrap();
// // B2 -> C3
let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap();
// this push is required as otherwise C3 has the same hash as B3 and won't get imported
builder.push_transfer(Transfer {
from: Keyring::Alice.to_raw_public().into(),
to: Keyring::Ferdie.to_raw_public().into(),
amount: 1,
nonce: 1,
}).unwrap();
let c3 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, c3.clone()).unwrap();
// A1 -> D2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
// this push is required as otherwise D2 has the same hash as B2 and won't get imported
builder.push_transfer(Transfer {
from: Keyring::Alice.to_raw_public().into(),
to: Keyring::Ferdie.to_raw_public().into(),
amount: 1,
nonce: 0,
}).unwrap();
let d2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, d2.clone()).unwrap();
assert_eq!(client.info().unwrap().chain.best_hash, a5.hash());
let genesis_hash = client.info().unwrap().chain.genesis_hash;
let leaves = BlockchainBackendT::leaves(client.backend().blockchain()).unwrap();
assert!(leaves.contains(&a5.hash()));
assert!(leaves.contains(&b4.hash()));
assert!(leaves.contains(&c3.hash()));
assert!(leaves.contains(&d2.hash()));
assert_eq!(leaves.len(), 4);
// search without restriction
assert_eq!(a5.hash(), client.best_containing(genesis_hash, None).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a1.hash(), None).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a2.hash(), None).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a3.hash(), None).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a4.hash(), None).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a5.hash(), None).unwrap().unwrap());
assert_eq!(b4.hash(), client.best_containing(b2.hash(), None).unwrap().unwrap());
assert_eq!(b4.hash(), client.best_containing(b3.hash(), None).unwrap().unwrap());
assert_eq!(b4.hash(), client.best_containing(b4.hash(), None).unwrap().unwrap());
assert_eq!(c3.hash(), client.best_containing(c3.hash(), None).unwrap().unwrap());
assert_eq!(d2.hash(), client.best_containing(d2.hash(), None).unwrap().unwrap());
// search only blocks with number <= 5. equivalent to without restriction for this scenario
assert_eq!(a5.hash(), client.best_containing(genesis_hash, Some(5)).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a1.hash(), Some(5)).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a2.hash(), Some(5)).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a3.hash(), Some(5)).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a4.hash(), Some(5)).unwrap().unwrap());
assert_eq!(a5.hash(), client.best_containing(a5.hash(), Some(5)).unwrap().unwrap());
assert_eq!(b4.hash(), client.best_containing(b2.hash(), Some(5)).unwrap().unwrap());
assert_eq!(b4.hash(), client.best_containing(b3.hash(), Some(5)).unwrap().unwrap());
assert_eq!(b4.hash(), client.best_containing(b4.hash(), Some(5)).unwrap().unwrap());
assert_eq!(c3.hash(), client.best_containing(c3.hash(), Some(5)).unwrap().unwrap());
assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(5)).unwrap().unwrap());
// search only blocks with number <= 4
assert_eq!(a4.hash(), client.best_containing(genesis_hash, Some(4)).unwrap().unwrap());
assert_eq!(a4.hash(), client.best_containing(a1.hash(), Some(4)).unwrap().unwrap());
assert_eq!(a4.hash(), client.best_containing(a2.hash(), Some(4)).unwrap().unwrap());
assert_eq!(a4.hash(), client.best_containing(a3.hash(), Some(4)).unwrap().unwrap());
assert_eq!(a4.hash(), client.best_containing(a4.hash(), Some(4)).unwrap().unwrap());
assert_eq!(None, client.best_containing(a5.hash(), Some(4)).unwrap());
assert_eq!(b4.hash(), client.best_containing(b2.hash(), Some(4)).unwrap().unwrap());
assert_eq!(b4.hash(), client.best_containing(b3.hash(), Some(4)).unwrap().unwrap());
assert_eq!(b4.hash(), client.best_containing(b4.hash(), Some(4)).unwrap().unwrap());
assert_eq!(c3.hash(), client.best_containing(c3.hash(), Some(4)).unwrap().unwrap());
assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(4)).unwrap().unwrap());
// search only blocks with number <= 3
assert_eq!(a3.hash(), client.best_containing(genesis_hash, Some(3)).unwrap().unwrap());
assert_eq!(a3.hash(), client.best_containing(a1.hash(), Some(3)).unwrap().unwrap());
assert_eq!(a3.hash(), client.best_containing(a2.hash(), Some(3)).unwrap().unwrap());
assert_eq!(a3.hash(), client.best_containing(a3.hash(), Some(3)).unwrap().unwrap());
assert_eq!(None, client.best_containing(a4.hash(), Some(3)).unwrap());
assert_eq!(None, client.best_containing(a5.hash(), Some(3)).unwrap());
assert_eq!(b3.hash(), client.best_containing(b2.hash(), Some(3)).unwrap().unwrap());
assert_eq!(b3.hash(), client.best_containing(b3.hash(), Some(3)).unwrap().unwrap());
assert_eq!(None, client.best_containing(b4.hash(), Some(3)).unwrap());
assert_eq!(c3.hash(), client.best_containing(c3.hash(), Some(3)).unwrap().unwrap());
assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(3)).unwrap().unwrap());
// search only blocks with number <= 2
assert_eq!(a2.hash(), client.best_containing(genesis_hash, Some(2)).unwrap().unwrap());
assert_eq!(a2.hash(), client.best_containing(a1.hash(), Some(2)).unwrap().unwrap());
assert_eq!(a2.hash(), client.best_containing(a2.hash(), Some(2)).unwrap().unwrap());
assert_eq!(None, client.best_containing(a3.hash(), Some(2)).unwrap());
assert_eq!(None, client.best_containing(a4.hash(), Some(2)).unwrap());
assert_eq!(None, client.best_containing(a5.hash(), Some(2)).unwrap());
assert_eq!(b2.hash(), client.best_containing(b2.hash(), Some(2)).unwrap().unwrap());
assert_eq!(None, client.best_containing(b3.hash(), Some(2)).unwrap());
assert_eq!(None, client.best_containing(b4.hash(), Some(2)).unwrap());
assert_eq!(None, client.best_containing(c3.hash(), Some(2)).unwrap());
assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(2)).unwrap().unwrap());
// search only blocks with number <= 1
assert_eq!(a1.hash(), client.best_containing(genesis_hash, Some(1)).unwrap().unwrap());
assert_eq!(a1.hash(), client.best_containing(a1.hash(), Some(1)).unwrap().unwrap());
assert_eq!(None, client.best_containing(a2.hash(), Some(1)).unwrap());
assert_eq!(None, client.best_containing(a3.hash(), Some(1)).unwrap());
assert_eq!(None, client.best_containing(a4.hash(), Some(1)).unwrap());
assert_eq!(None, client.best_containing(a5.hash(), Some(1)).unwrap());
assert_eq!(None, client.best_containing(b2.hash(), Some(1)).unwrap());
assert_eq!(None, client.best_containing(b3.hash(), Some(1)).unwrap());
assert_eq!(None, client.best_containing(b4.hash(), Some(1)).unwrap());
assert_eq!(None, client.best_containing(c3.hash(), Some(1)).unwrap());
assert_eq!(None, client.best_containing(d2.hash(), Some(1)).unwrap());
// search only blocks with number <= 0
assert_eq!(genesis_hash, client.best_containing(genesis_hash, Some(0)).unwrap().unwrap());
assert_eq!(None, client.best_containing(a1.hash(), Some(0)).unwrap());
assert_eq!(None, client.best_containing(a2.hash(), Some(0)).unwrap());
assert_eq!(None, client.best_containing(a3.hash(), Some(0)).unwrap());
assert_eq!(None, client.best_containing(a4.hash(), Some(0)).unwrap());
assert_eq!(None, client.best_containing(a5.hash(), Some(0)).unwrap());
assert_eq!(None, client.best_containing(b2.hash(), Some(0)).unwrap());
assert_eq!(None, client.best_containing(b3.hash(), Some(0)).unwrap());
assert_eq!(None, client.best_containing(b4.hash(), Some(0)).unwrap());
assert_eq!(None, client.best_containing(c3.hash().clone(), Some(0)).unwrap());
assert_eq!(None, client.best_containing(d2.hash().clone(), Some(0)).unwrap());
}
}
+72 -6
View File
@@ -32,6 +32,7 @@ use state_machine::backend::{Backend as StateBackend, InMemory};
use state_machine::InMemoryChangesTrieStorage;
use hash_db::Hasher;
use heapsize::HeapSizeOf;
use leaves::LeafSet;
use trie::MemoryDB;
struct PendingBlock<B: BlockT> {
@@ -69,7 +70,7 @@ impl<B: BlockT> StoredBlock<B> {
fn extrinsics(&self) -> Option<&[B::Extrinsic]> {
match *self {
StoredBlock::Header(_, _) => None,
StoredBlock::Full(ref b, _) => Some(b.extrinsics())
StoredBlock::Full(ref b, _) => Some(b.extrinsics()),
}
}
@@ -93,6 +94,7 @@ struct BlockchainStorage<Block: BlockT> {
finalized_hash: Block::Hash,
genesis_hash: Block::Hash,
cht_roots: HashMap<NumberFor<Block>, Block::Hash>,
leaves: LeafSet<Block::Hash, NumberFor<Block>>,
}
/// In-memory blockchain. Supports concurrent reads.
@@ -139,6 +141,7 @@ impl<Block: BlockT> Blockchain<Block> {
finalized_hash: Default::default(),
genesis_hash: Default::default(),
cht_roots: HashMap::new(),
leaves: LeafSet::new(),
}));
Blockchain {
storage: storage.clone(),
@@ -157,16 +160,50 @@ impl<Block: BlockT> Blockchain<Block> {
justification: Option<Justification<Block::Hash>>,
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
new_state: NewBlockState,
) {
) -> ::error::Result<()> {
let number = header.number().clone();
let best_tree_route = match new_state.is_best() {
false => None,
true => {
let best_hash = self.storage.read().best_hash;
if &best_hash == header.parent_hash() {
None
} else {
let route = ::blockchain::tree_route(
self,
BlockId::Hash(best_hash),
BlockId::Hash(*header.parent_hash()),
)?;
Some(route)
}
}
};
let mut storage = self.storage.write();
storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification));
storage.hashes.insert(number, hash.clone());
storage.leaves.import(hash.clone(), number.clone(), header.parent_hash().clone());
if new_state.is_best() {
if let Some(tree_route) = best_tree_route {
// apply retraction and enaction when reorganizing up to parent hash
let enacted = tree_route.enacted();
for entry in enacted {
storage.hashes.insert(entry.number, entry.hash);
}
for entry in tree_route.retracted().iter().skip(enacted.len()) {
storage.hashes.remove(&entry.number);
}
}
storage.best_hash = hash.clone();
storage.best_number = number.clone();
storage.hashes.insert(number, hash.clone());
}
storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification));
if let NewBlockState::Final = new_state {
storage.finalized_hash = hash;
}
@@ -174,6 +211,8 @@ impl<Block: BlockT> Blockchain<Block> {
if number == Zero::zero() {
storage.genesis_hash = hash;
}
Ok(())
}
/// Compare this blockchain with another in-mem blockchain
@@ -262,6 +301,10 @@ impl<Block: BlockT> blockchain::Backend<Block> for Blockchain<Block> {
fn cache(&self) -> Option<&blockchain::Cache<Block>> {
Some(&self.cache)
}
fn leaves(&self) -> error::Result<Vec<Block::Hash>> {
Ok(self.storage.read().leaves.hashes())
}
}
impl<Block: BlockT> light::blockchain::Storage<Block> for Blockchain<Block>
@@ -276,7 +319,7 @@ impl<Block: BlockT> light::blockchain::Storage<Block> for Blockchain<Block>
) -> error::Result<()> {
let hash = header.hash();
let parent_hash = *header.parent_hash();
self.insert(hash, header, None, None, state);
self.insert(hash, header, None, None, state)?;
if state.is_best() {
self.cache.insert(parent_hash, authorities);
}
@@ -436,7 +479,7 @@ where
}
}
self.blockchain.insert(hash, header, justification, body, pending_block.state);
self.blockchain.insert(hash, header, justification, body, pending_block.state)?;
// dumb implementation - store value for each block
if pending_block.state.is_best() {
self.blockchain.cache.insert(parent_hash, operation.pending_authorities);
@@ -501,3 +544,26 @@ pub fn cache_authorities_at<Block: BlockT>(
) {
blockchain.cache.insert(at, authorities);
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use test_client;
use primitives::Blake2Hasher;
type TestBackend = test_client::client::in_mem::Backend<test_client::runtime::Block, Blake2Hasher>;
#[test]
fn test_leaves_with_complex_block_tree() {
let backend = Arc::new(TestBackend::new());
test_client::trait_tests::test_leaves_for_backend(backend);
}
#[test]
fn test_blockchain_query_by_number_gets_canonical() {
let backend = Arc::new(TestBackend::new());
test_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend);
}
}
+204
View File
@@ -0,0 +1,204 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
use std::collections::BTreeSet;
use std::cmp::{Ord, Ordering};
use kvdb::{KeyValueDB, DBTransaction};
use runtime_primitives::traits::SimpleArithmetic;
use codec::{Encode, Decode};
use error;
/// helper wrapper type to keep a list of block hashes ordered
/// by `number` descending in a `BTreeSet` which allows faster and simpler
/// insertion and removal than keeping them in a list.
#[derive(Debug, Clone)]
struct LeafSetItem<H, N> {
hash: H,
number: N,
}
impl<H, N> Ord for LeafSetItem<H, N> where N: Ord {
fn cmp(&self, other: &Self) -> Ordering {
// reverse (descending) order
other.number.cmp(&self.number)
}
}
impl<H, N> PartialOrd for LeafSetItem<H, N> where N: PartialOrd {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
// reverse (descending) order
other.number.partial_cmp(&self.number)
}
}
impl<H, N> PartialEq for LeafSetItem<H, N> where N: PartialEq {
fn eq(&self, other: &LeafSetItem<H, N>) -> bool {
self.number == other.number
}
}
impl<H, N> Eq for LeafSetItem<H, N> where N: PartialEq {}
/// A displaced leaf after import.
pub struct DisplacedLeaf<H, N> {
new_hash: H,
displaced: LeafSetItem<H, N>,
}
/// list of leaf hashes ordered by number (descending).
/// stored in memory for fast access.
/// this allows very fast checking and modification of active leaves.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct LeafSet<H, N> {
storage: BTreeSet<LeafSetItem<H, N>>,
}
impl<H, N> LeafSet<H, N> where
H: Clone + Decode + Encode,
N: Clone + SimpleArithmetic + Decode + Encode,
{
/// Construct a new, blank leaf set.
pub fn new() -> Self {
Self {
storage: BTreeSet::new()
}
}
/// Read the leaf list from the DB, using given prefix for keys.
pub fn read_from_db(db: &KeyValueDB, column: Option<u32>, prefix: &[u8]) -> error::Result<Self> {
let mut storage = BTreeSet::new();
for (key, value) in db.iter_from_prefix(column, prefix) {
let raw_hash = &mut &key[prefix.len()..];
let hash = match Decode::decode(raw_hash) {
Some(hash) => hash,
None => return Err(error::ErrorKind::Backend("Error decoding hash".into()).into()),
};
let number = match Decode::decode(&mut &value[..]) {
Some(number) => number,
None => return Err(error::ErrorKind::Backend("Error decoding number".into()).into()),
};
storage.insert(LeafSetItem { hash, number });
}
Ok(Self { storage })
}
/// update the leaf list on import. returns a displaced leaf if there was one.
pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option<DisplacedLeaf<H, N>> {
// avoid underflow for genesis.
let displaced = if number != N::zero() {
let displaced = LeafSetItem {
hash: parent_hash,
number: number.clone() - N::one(),
};
let was_displaced = self.storage.remove(&displaced);
if was_displaced {
Some(DisplacedLeaf {
new_hash: hash.clone(),
displaced,
})
} else {
None
}
} else {
None
};
self.storage.insert(LeafSetItem { hash, number });
displaced
}
/// Undo an import operation, with a displaced leaf.
pub fn undo(&mut self, displaced: DisplacedLeaf<H, N>) {
let new_number = displaced.displaced.number.clone() + N::one();
self.storage.remove(&LeafSetItem { hash: displaced.new_hash, number: new_number });
self.storage.insert(displaced.displaced);
}
/// currently since revert only affects the canonical chain
/// we assume that parent has no further children
/// and we add it as leaf again
pub fn revert(&mut self, hash: H, number: N, parent_hash: H) {
self.storage.insert(LeafSetItem {
hash: parent_hash,
number: number.clone() - N::one(),
});
self.storage.remove(&LeafSetItem { hash, number });
}
/// returns an iterator over all hashes in the leaf set
/// ordered by their block number descending.
pub fn hashes(&self) -> Vec<H> {
self.storage.iter().map(|item| item.hash.clone()).collect()
}
/// Write the leaf list to the database transaction.
pub fn prepare_transaction(&self, tx: &mut DBTransaction, column: Option<u32>, prefix: &[u8]) {
let mut buf = prefix.to_vec();
for &LeafSetItem { ref hash, ref number } in &self.storage {
hash.using_encoded(|s| buf.extend(s));
tx.put_vec(column, &buf[..], number.encode());
buf.truncate(prefix.len()); // reuse allocation.
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let mut set = LeafSet::new();
set.import(0u32, 0u32, 0u32);
set.import(1_1, 1, 0);
set.import(2_1, 2, 1_1);
set.import(3_1, 3, 2_1);
assert!(set.storage.contains(&LeafSetItem { hash: 3_1, number: 3 }));
assert!(!set.storage.contains(&LeafSetItem { hash: 2_1, number: 2 }));
assert!(!set.storage.contains(&LeafSetItem { hash: 1_1, number: 1 }));
assert!(!set.storage.contains(&LeafSetItem { hash: 0, number: 0 }));
set.import(2_2, 2, 1_1);
assert!(set.storage.contains(&LeafSetItem { hash: 3_1, number: 3 }));
assert!(set.storage.contains(&LeafSetItem { hash: 2_2, number: 2 }));
}
#[test]
fn flush_to_disk() {
const PREFIX: &[u8] = b"abcdefg";
let db = ::kvdb_memorydb::create(0);
let mut set = LeafSet::new();
set.import(0u32, 0u32, 0u32);
set.import(1_1, 1, 0);
set.import(2_1, 2, 1_1);
set.import(3_1, 3, 2_1);
let mut tx = DBTransaction::new();
set.prepare_transaction(&mut tx, None, PREFIX);
db.write(tx).unwrap();
let set2 = LeafSet::read_from_db(&db, None, PREFIX).unwrap();
assert_eq!(set, set2);
}
}
+5
View File
@@ -40,11 +40,13 @@ extern crate hash_db;
extern crate rlp;
extern crate heapsize;
extern crate memory_db;
extern crate kvdb;
#[macro_use] extern crate error_chain;
#[macro_use] extern crate log;
#[cfg_attr(test, macro_use)] extern crate substrate_executor as executor;
#[cfg(test)] #[macro_use] extern crate hex_literal;
#[cfg(test)] extern crate kvdb_memorydb;
pub mod error;
pub mod blockchain;
@@ -54,6 +56,7 @@ pub mod in_mem;
pub mod genesis;
pub mod block_builder;
pub mod light;
mod leaves;
mod call_executor;
mod client;
mod notifications;
@@ -61,9 +64,11 @@ mod notifications;
pub use blockchain::Info as ChainInfo;
pub use call_executor::{CallResult, CallExecutor, LocalCallExecutor};
pub use client::{
new_with_backend,
new_in_mem,
BlockBody, BlockStatus, BlockOrigin, ImportNotifications, FinalityNotifications, BlockchainEvents,
Client, ClientInfo, ChainHead, ImportResult, JustifiedHeader,
};
pub use notifications::{StorageEventStream, StorageChangeSet};
pub use state_machine::ExecutionStrategy;
pub use leaves::LeafSet;
@@ -150,4 +150,8 @@ impl<S, F, Block> BlockchainBackend<Block> for Blockchain<S, F> where Block: Blo
fn cache(&self) -> Option<&BlockchainCache<Block>> {
self.storage.cache()
}
fn leaves(&self) -> ClientResult<Vec<Block::Hash>> {
unimplemented!()
}
}
@@ -21,7 +21,6 @@ use client;
use keyring;
use runtime;
use {Backend, Executor};
use primitives::{Blake2Hasher};
/// Extension trait for test block builder.
@@ -30,7 +29,11 @@ pub trait BlockBuilderExt {
fn push_transfer(&mut self, transfer: runtime::Transfer) -> Result<(), client::error::Error>;
}
impl BlockBuilderExt for client::block_builder::BlockBuilder<Backend, Executor, runtime::Block, Blake2Hasher> {
impl<B, E> BlockBuilderExt for client::block_builder::BlockBuilder<B, E, runtime::Block, Blake2Hasher>
where
B: client::backend::Backend<runtime::Block, Blake2Hasher>,
E: client::CallExecutor<runtime::Block, Blake2Hasher> + Clone,
{
fn push_transfer(&mut self, transfer: runtime::Transfer) -> Result<(), client::error::Error> {
self.push(sign_tx(transfer))
}
+7 -27
View File
@@ -18,18 +18,13 @@
use client::{self, Client};
use keyring::Keyring;
use runtime_primitives::{generic::BlockId, StorageMap};
use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis};
use executor::NativeExecutor;
use runtime_primitives::generic::BlockId;
use primitives::Blake2Hasher;
use runtime;
use bft;
use {Backend, Executor};
/// Extension trait for a test client.
pub trait TestClient {
/// Crates new client instance for tests.
fn new_for_tests() -> Self;
/// Justify and import block to the chain. No finality.
fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()>;
@@ -40,11 +35,11 @@ pub trait TestClient {
fn genesis_hash(&self) -> runtime::Hash;
}
impl TestClient for Client<Backend, Executor, runtime::Block> {
fn new_for_tests() -> Self {
client::new_in_mem(NativeExecutor::new(), genesis_storage()).unwrap()
}
impl<B, E> TestClient for Client<B, E, runtime::Block>
where
B: client::backend::Backend<runtime::Block, Blake2Hasher>,
E: client::CallExecutor<runtime::Block, Blake2Hasher>
{
fn justify_and_import(&self, origin: client::BlockOrigin, block: runtime::Block) -> client::error::Result<()> {
let justification = fake_justify(&block.header);
let justified = self.check_justification(block.header, justification)?;
@@ -93,18 +88,3 @@ fn fake_justify(header: &runtime::Header) -> bft::UncheckedJustification<runtime
1,
)
}
fn genesis_config() -> GenesisConfig {
GenesisConfig::new_simple(vec![
Keyring::Alice.to_raw_public().into(),
Keyring::Bob.to_raw_public().into(),
Keyring::Charlie.to_raw_public().into(),
], 1000)
}
fn genesis_storage() -> StorageMap {
let mut storage = genesis_config().genesis_map();
let block: runtime::Block = client::genesis::construct_genesis_block(&storage);
storage.extend(additional_storage_with_genesis(&block));
storage
}
+37 -3
View File
@@ -33,13 +33,22 @@ pub extern crate substrate_client as client;
pub extern crate substrate_keyring as keyring;
pub extern crate substrate_test_runtime as runtime;
mod client_ext;
pub mod client_ext;
pub mod trait_tests;
mod block_builder_ext;
use std::sync::Arc;
pub use client_ext::TestClient;
pub use block_builder_ext::BlockBuilderExt;
pub use client::blockchain;
pub use client::backend;
pub use executor::NativeExecutor;
use primitives::{Blake2Hasher};
use primitives::Blake2Hasher;
use runtime_primitives::StorageMap;
use runtime::genesismap::{GenesisConfig, additional_storage_with_genesis};
use keyring::Keyring;
mod local_executor {
#![allow(missing_docs)]
@@ -62,5 +71,30 @@ pub type Executor = client::LocalCallExecutor<
/// Creates new client instance used for tests.
pub fn new() -> client::Client<Backend, Executor, runtime::Block> {
TestClient::new_for_tests()
new_with_backend(Arc::new(Backend::new()))
}
/// Creates new client instance used for tests with an explicitely provided backend.
/// This is useful for testing backend implementations.
pub fn new_with_backend<B>(backend: Arc<B>) -> client::Client<B, client::LocalCallExecutor<B, executor::NativeExecutor<LocalExecutor>>, runtime::Block>
where
B: backend::LocalBackend<runtime::Block, Blake2Hasher>,
{
let executor = NativeExecutor::new();
client::new_with_backend(backend, executor, genesis_storage()).unwrap()
}
fn genesis_config() -> GenesisConfig {
GenesisConfig::new_simple(vec![
Keyring::Alice.to_raw_public().into(),
Keyring::Bob.to_raw_public().into(),
Keyring::Charlie.to_raw_public().into(),
], 1000)
}
fn genesis_storage() -> StorageMap {
let mut storage = genesis_config().genesis_map();
let block: runtime::Block = client::genesis::construct_genesis_block(&storage);
storage.extend(additional_storage_with_genesis(&block));
storage
}
@@ -0,0 +1,241 @@
// Copyright 2018 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! tests that should hold for all implementations of certain traits.
//! to test implementations without duplication.
#![allow(missing_docs)]
use std::sync::Arc;
use keyring::Keyring;
use client::BlockOrigin;
use primitives::Blake2Hasher;
use ::TestClient;
use runtime_primitives::traits::Block as BlockT;
use backend;
use blockchain::{Backend as BlockChainBackendT, HeaderBackend};
use ::BlockBuilderExt;
use runtime::{self, Transfer};
use runtime_primitives::generic::BlockId;
/// helper to test the `leaves` implementation for various backends
pub fn test_leaves_for_backend<B>(backend: Arc<B>) where
B: backend::LocalBackend<runtime::Block, Blake2Hasher>,
{
// block tree:
// G -> A1 -> A2 -> A3 -> A4 -> A5
// A1 -> B2 -> B3 -> B4
// B2 -> C3
// A1 -> D2
let client = ::new_with_backend(backend.clone());
let genesis_hash = client.info().unwrap().chain.genesis_hash;
assert_eq!(
client.backend().blockchain().leaves().unwrap(),
vec![genesis_hash]);
// G -> A1
let a1 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a1.hash()]);
// A1 -> A2
let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
assert_eq!(
client.backend().blockchain().leaves().unwrap(),
vec![a2.hash()]);
// A2 -> A3
let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a3.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a3.hash()]);
// A3 -> A4
let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a4.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a4.hash()]);
// A4 -> A5
let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a5.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash()]);
// A1 -> B2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
// this push is required as otherwise B2 has the same hash as A2 and won't get imported
builder.push_transfer(Transfer {
from: Keyring::Alice.to_raw_public().into(),
to: Keyring::Ferdie.to_raw_public().into(),
amount: 41,
nonce: 0,
}).unwrap();
let b2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b2.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b2.hash()]);
// B2 -> B3
let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b3.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b3.hash()]);
// B3 -> B4
let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b4.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b4.hash()]);
// // B2 -> C3
let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap();
// this push is required as otherwise C3 has the same hash as B3 and won't get imported
builder.push_transfer(Transfer {
from: Keyring::Alice.to_raw_public().into(),
to: Keyring::Ferdie.to_raw_public().into(),
amount: 1,
nonce: 1,
}).unwrap();
let c3 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, c3.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b4.hash(), c3.hash()]);
// A1 -> D2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
// this push is required as otherwise D2 has the same hash as B2 and won't get imported
builder.push_transfer(Transfer {
from: Keyring::Alice.to_raw_public().into(),
to: Keyring::Ferdie.to_raw_public().into(),
amount: 1,
nonce: 0,
}).unwrap();
let d2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, d2.clone()).unwrap();
assert_eq!(
backend.blockchain().leaves().unwrap(),
vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()]);
}
pub fn test_blockchain_query_by_number_gets_canonical<B>(backend: Arc<B>) where
B: backend::LocalBackend<runtime::Block, Blake2Hasher>,
{
// block tree:
// G -> A1 -> A2 -> A3 -> A4 -> A5
// A1 -> B2 -> B3 -> B4
// B2 -> C3
// A1 -> D2
let client = ::new_with_backend(backend);
// G -> A1
let a1 = client.new_block().unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
// A1 -> A2
let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
// A2 -> A3
let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a3.clone()).unwrap();
// A3 -> A4
let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a4.clone()).unwrap();
// A4 -> A5
let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, a5.clone()).unwrap();
// A1 -> B2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
// this push is required as otherwise B2 has the same hash as A2 and won't get imported
builder.push_transfer(Transfer {
from: Keyring::Alice.to_raw_public().into(),
to: Keyring::Ferdie.to_raw_public().into(),
amount: 41,
nonce: 0,
}).unwrap();
let b2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b2.clone()).unwrap();
// B2 -> B3
let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b3.clone()).unwrap();
// B3 -> B4
let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap();
client.justify_and_import(BlockOrigin::Own, b4.clone()).unwrap();
// // B2 -> C3
let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap();
// this push is required as otherwise C3 has the same hash as B3 and won't get imported
builder.push_transfer(Transfer {
from: Keyring::Alice.to_raw_public().into(),
to: Keyring::Ferdie.to_raw_public().into(),
amount: 1,
nonce: 1,
}).unwrap();
let c3 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, c3.clone()).unwrap();
// A1 -> D2
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
// this push is required as otherwise D2 has the same hash as B2 and won't get imported
builder.push_transfer(Transfer {
from: Keyring::Alice.to_raw_public().into(),
to: Keyring::Ferdie.to_raw_public().into(),
amount: 1,
nonce: 0,
}).unwrap();
let d2 = builder.bake().unwrap();
client.justify_and_import(BlockOrigin::Own, d2.clone()).unwrap();
let genesis_hash = client.info().unwrap().chain.genesis_hash;
assert_eq!(client.backend().blockchain().header(BlockId::Number(0)).unwrap().unwrap().hash(), genesis_hash);
assert_eq!(client.backend().blockchain().hash(0).unwrap().unwrap(), genesis_hash);
assert_eq!(client.backend().blockchain().header(BlockId::Number(1)).unwrap().unwrap().hash(), a1.hash());
assert_eq!(client.backend().blockchain().hash(1).unwrap().unwrap(), a1.hash());
assert_eq!(client.backend().blockchain().header(BlockId::Number(2)).unwrap().unwrap().hash(), a2.hash());
assert_eq!(client.backend().blockchain().hash(2).unwrap().unwrap(), a2.hash());
assert_eq!(client.backend().blockchain().header(BlockId::Number(3)).unwrap().unwrap().hash(), a3.hash());
assert_eq!(client.backend().blockchain().hash(3).unwrap().unwrap(), a3.hash());
assert_eq!(client.backend().blockchain().header(BlockId::Number(4)).unwrap().unwrap().hash(), a4.hash());
assert_eq!(client.backend().blockchain().hash(4).unwrap().unwrap(), a4.hash());
assert_eq!(client.backend().blockchain().header(BlockId::Number(5)).unwrap().unwrap().hash(), a5.hash());
assert_eq!(client.backend().blockchain().hash(5).unwrap().unwrap(), a5.hash());
}