mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-01 10:07:56 +00:00
best_containing operations (issue 603) (#740)
* add stub for Client.best_chain_containing_block_hash * add fn blockchain::Backend::leaf_hashes * fix typo * sketch out Client.best_chain_containing_block_hash * fix indent * Blockchain.leaf_hashes -> Blockchain.leaves * add unimplemented! stub impls for Blockchain.leaves * start impl of Blockchain.leaves for in-memory client db * Client.best_chain_containing...: check canonical first and make compile * first rough attempt at maintaining leaf list in in-memory db * fix tab indent * add test best_chain_containing_single_block * add failing test best_chain_containing_with_fork * pub use client::blockchain; in test-client to prevent circular dep in client tests * best_chain_containing_with_single_block: improve and test leaves * far improve in-memory Backend::leaves impl * test blockchain::Backend::leaves more thoroughly * handle more edge cases in blockchain::Backend::leaves impl for in memory * fix test best_chain_containing_with_fork (two distinct test blocks had same hash) * make best_chain_containing_block_hash pass existing tests * improve docstring for Blockchain::leaves * Client.best_chain_containing: some cleanup. support max_block_number * best_chain_containing: remove broken outcommented fast check for best = canonical * remove blank line * best_block_containing: return None if target_hash not found * best_chain_containing: add unreachable! at end of function * improve tests for best_chain_containing * renames * more elaborate test scenario for best_containing * best_containing: fix restriction of search through maybe_max_number * best_containing: tests for restriction of search * get rid of unnecessary clones * replace Client::new_in_mem by new_with_backend which is useful for testing backends * add test_client::new_with_backend for testing different backend impls * add test for in_mem::Backend::leaves * remove unused imports * in_mem test_leaves: simplify * flesh out tests for in_mem leaves impl * remove tests for leaves from client which are now covered in implementing module * improve comment * add Client.new_in_mem again * unwrap in test_client::new_with_backend * make test_client::BlockBuilderExt work not just with in-mem backend * make test client ext not just work with in mem backend * add failing Backend.leaves test for client-db * update Cargo.lock * replace KeccakHasher with Blake2Hasher * refactor address grumble https://github.com/paritytech/substrate/pull/740#discussion_r217822862 * refactor using NumberFor address grumble https://github.com/paritytech/substrate/pull/740#discussion_r217823341 * add test that exposes possible problem * update docstring for Client.best_containing * extract test for Backend.leaves for reuse * improve test blockchain_header_and_hash_return_blocks_from_canonical_chain_given_block_numbers * extract test_blockchain_query_by_number_gets_canonical to easily test multiple impls * remove whitespace * remove todo * Client.best_containing: pre-empt search loop when target in canonical * best_containing: prevent race condition by holding import lock * add todo * extract leaf list update code into function * add comment * client-db: use in-memory-kvdb for tests * use BTreeSet to store leaves for in-mem which is faster and simpler * add docstring * add comments and fix formatting * add initial raw version of LeafSet * remove Client::update_leaves which has been superceded by LeafSet * use LeafSet in in-mem backend * keccak -> blake2 * don't reexport codec traits in primitives addresses https://github.com/paritytech/substrate/pull/740#discussion_r219538185 * fix rebase mistake * improve LeafSet and use it in state-db * correct Transfer nonces to fix ApplyExtinsicFailed(Stale) * use given backend in canoncal test * kill dead tree-route code in util * fix warnings * tests for leafset * reorganizations in in_mem backend * fix reorganization canon block logic * DB commit and safe reversion on write error * fix style nits
This commit is contained in:
@@ -53,6 +53,11 @@ pub trait Backend<Block: BlockT>: HeaderBackend<Block> {
|
||||
|
||||
/// Returns data cache reference, if it is enabled on this backend.
|
||||
fn cache(&self) -> Option<&Cache<Block>>;
|
||||
|
||||
/// Returns hashes of all blocks that are leaves of the block tree.
|
||||
/// in other words, that have no children, are chain heads.
|
||||
/// Results must be ordered best (longest, heighest) chain first.
|
||||
fn leaves(&self) -> Result<Vec<Block::Hash>>;
|
||||
}
|
||||
|
||||
/// Blockchain optional data cache.
|
||||
|
||||
@@ -57,7 +57,7 @@ pub struct Client<B, E, Block> where Block: BlockT {
|
||||
execution_strategy: ExecutionStrategy,
|
||||
}
|
||||
|
||||
/// A source of blockchain evenets.
|
||||
/// A source of blockchain events.
|
||||
pub trait BlockchainEvents<Block: BlockT> {
|
||||
/// Get block import event stream. Not guaranteed to be fired for every
|
||||
/// imported block.
|
||||
@@ -190,9 +190,25 @@ pub fn new_in_mem<E, Block, S>(
|
||||
Block: BlockT,
|
||||
H256: From<Block::Hash>,
|
||||
{
|
||||
let backend = Arc::new(in_mem::Backend::new());
|
||||
let executor = LocalCallExecutor::new(backend.clone(), executor);
|
||||
Client::new(backend, executor, genesis_storage, ExecutionStrategy::NativeWhenPossible)
|
||||
new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage)
|
||||
}
|
||||
|
||||
/// Create a client with the explicitely provided backend.
|
||||
/// This is useful for testing backend implementations.
|
||||
pub fn new_with_backend<B, E, Block, S>(
|
||||
backend: Arc<B>,
|
||||
executor: E,
|
||||
build_genesis_storage: S,
|
||||
) -> error::Result<Client<B, LocalCallExecutor<B, E>, Block>>
|
||||
where
|
||||
E: CodeExecutor<Blake2Hasher> + RuntimeInfo,
|
||||
S: BuildStorage,
|
||||
Block: BlockT,
|
||||
H256: From<Block::Hash>,
|
||||
B: backend::LocalBackend<Block, Blake2Hasher>
|
||||
{
|
||||
let call_executor = LocalCallExecutor::new(backend.clone(), executor);
|
||||
Client::new(backend, call_executor, build_genesis_storage, ExecutionStrategy::NativeWhenPossible)
|
||||
}
|
||||
|
||||
impl<B, E, Block> Client<B, E, Block> where
|
||||
@@ -723,6 +739,105 @@ impl<B, E, Block> Client<B, E, Block> where
|
||||
let info = self.backend.blockchain().info().map_err(|e| error::Error::from_blockchain(Box::new(e)))?;
|
||||
Ok(self.header(&BlockId::Hash(info.best_hash))?.expect("Best block header must always exist"))
|
||||
}
|
||||
|
||||
/// Get the most recent block hash of the best (longest) chains
|
||||
/// that contain block with the given `target_hash`.
|
||||
/// If `maybe_max_block_number` is `Some(max_block_number)`
|
||||
/// the search is limited to block `numbers <= max_block_number`.
|
||||
/// in other words as if there were no blocks greater `max_block_number`.
|
||||
/// TODO [snd] possibly implement this on blockchain::Backend and just redirect here
|
||||
/// Returns `Ok(None)` if `target_hash` is not found in search space.
|
||||
/// TODO [snd] write down time complexity
|
||||
pub fn best_containing(&self, target_hash: Block::Hash, maybe_max_number: Option<NumberFor<Block>>) -> error::Result<Option<Block::Hash>> {
|
||||
let target_header = {
|
||||
match self.backend.blockchain().header(BlockId::Hash(target_hash))? {
|
||||
Some(x) => x,
|
||||
// target not in blockchain
|
||||
None => { return Ok(None); },
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(max_number) = maybe_max_number {
|
||||
// target outside search range
|
||||
if target_header.number() > &max_number {
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
let (leaves, best_already_checked) = {
|
||||
// ensure no blocks are imported during this code block.
|
||||
// an import could trigger a reorg which could change the canonical chain.
|
||||
// we depend on the canonical chain staying the same during this code block.
|
||||
let _import_lock = self.import_lock.lock();
|
||||
|
||||
let info = self.backend.blockchain().info()?;
|
||||
|
||||
let canon_hash = self.backend.blockchain().hash(*target_header.number())?
|
||||
.ok_or_else(|| error::Error::from(format!("failed to get hash for block number {}", target_header.number())))?;
|
||||
|
||||
if canon_hash == target_hash {
|
||||
if let Some(max_number) = maybe_max_number {
|
||||
// something has to guarantee that max_number is in chain
|
||||
return Ok(Some(self.backend.blockchain().hash(max_number)?.ok_or_else(|| error::Error::from(format!("failed to get hash for block number {}", max_number)))?));
|
||||
} else {
|
||||
return Ok(Some(info.best_hash));
|
||||
}
|
||||
}
|
||||
(self.backend.blockchain().leaves()?, info.best_hash)
|
||||
};
|
||||
|
||||
// for each chain. longest chain first. shortest last
|
||||
for leaf_hash in leaves {
|
||||
// ignore canonical chain which we already checked above
|
||||
if leaf_hash == best_already_checked {
|
||||
continue;
|
||||
}
|
||||
|
||||
// start at the leaf
|
||||
let mut current_hash = leaf_hash;
|
||||
|
||||
// if search is not restricted then the leaf is the best
|
||||
let mut best_hash = leaf_hash;
|
||||
|
||||
// go backwards entering the search space
|
||||
// waiting until we are <= max_number
|
||||
if let Some(max_number) = maybe_max_number {
|
||||
loop {
|
||||
// TODO [snd] this should be a panic
|
||||
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
|
||||
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;
|
||||
|
||||
if current_header.number() <= &max_number {
|
||||
best_hash = current_header.hash();
|
||||
break;
|
||||
}
|
||||
|
||||
current_hash = *current_header.parent_hash();
|
||||
}
|
||||
}
|
||||
|
||||
// go backwards through the chain (via parent links)
|
||||
loop {
|
||||
// until we find target
|
||||
if current_hash == target_hash {
|
||||
return Ok(Some(best_hash));
|
||||
}
|
||||
|
||||
// TODO [snd] this should be a panic
|
||||
let current_header = self.backend.blockchain().header(BlockId::Hash(current_hash.clone()))?
|
||||
.ok_or_else(|| error::Error::from(format!("failed to get header for hash {}", current_hash)))?;
|
||||
|
||||
// stop search in this chain once we go below the target's block number
|
||||
if current_header.number() < target_header.number() {
|
||||
break;
|
||||
}
|
||||
|
||||
current_hash = *current_header.parent_hash();
|
||||
}
|
||||
}
|
||||
|
||||
unreachable!("this is a bug. `target_hash` is in blockchain but wasn't found following all leaves backwards");
|
||||
}
|
||||
}
|
||||
|
||||
impl<B, E, Block> CurrentHeight for Client<B, E, Block> where
|
||||
@@ -946,4 +1061,264 @@ mod tests {
|
||||
assert!(client.state_at(&BlockId::Number(1)).unwrap() != client.state_at(&BlockId::Number(0)).unwrap());
|
||||
assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn best_containing_with_genesis_block() {
|
||||
// block tree:
|
||||
// G
|
||||
|
||||
let client = test_client::new();
|
||||
|
||||
let genesis_hash = client.info().unwrap().chain.genesis_hash;
|
||||
|
||||
assert_eq!(genesis_hash.clone(), client.best_containing(genesis_hash.clone(), None).unwrap().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn best_containing_with_hash_not_found() {
|
||||
// block tree:
|
||||
// G
|
||||
|
||||
let client = test_client::new();
|
||||
|
||||
let uninserted_block = client.new_block().unwrap().bake().unwrap();
|
||||
|
||||
assert_eq!(None, client.best_containing(uninserted_block.hash().clone(), None).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn best_containing_with_single_chain_3_blocks() {
|
||||
// block tree:
|
||||
// G -> A1 -> A2
|
||||
|
||||
let client = test_client::new();
|
||||
|
||||
// G -> A1
|
||||
let a1 = client.new_block().unwrap().bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
|
||||
|
||||
// A1 -> A2
|
||||
let a2 = client.new_block().unwrap().bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
|
||||
|
||||
let genesis_hash = client.info().unwrap().chain.genesis_hash;
|
||||
|
||||
assert_eq!(a2.hash(), client.best_containing(genesis_hash, None).unwrap().unwrap());
|
||||
assert_eq!(a2.hash(), client.best_containing(a1.hash(), None).unwrap().unwrap());
|
||||
assert_eq!(a2.hash(), client.best_containing(a2.hash(), None).unwrap().unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn best_containing_with_multiple_forks() {
|
||||
// NOTE: we use the version of the trait from `test_client`
|
||||
// because that is actually different than the version linked to
|
||||
// in the test facade crate.
|
||||
use test_client::blockchain::Backend as BlockchainBackendT;
|
||||
|
||||
// block tree:
|
||||
// G -> A1 -> A2 -> A3 -> A4 -> A5
|
||||
// A1 -> B2 -> B3 -> B4
|
||||
// B2 -> C3
|
||||
// A1 -> D2
|
||||
let client = test_client::new();
|
||||
|
||||
// G -> A1
|
||||
let a1 = client.new_block().unwrap().bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, a1.clone()).unwrap();
|
||||
|
||||
// A1 -> A2
|
||||
let a2 = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap().bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, a2.clone()).unwrap();
|
||||
|
||||
// A2 -> A3
|
||||
let a3 = client.new_block_at(&BlockId::Hash(a2.hash())).unwrap().bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, a3.clone()).unwrap();
|
||||
|
||||
// A3 -> A4
|
||||
let a4 = client.new_block_at(&BlockId::Hash(a3.hash())).unwrap().bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, a4.clone()).unwrap();
|
||||
|
||||
// A4 -> A5
|
||||
let a5 = client.new_block_at(&BlockId::Hash(a4.hash())).unwrap().bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, a5.clone()).unwrap();
|
||||
|
||||
// A1 -> B2
|
||||
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
|
||||
// this push is required as otherwise B2 has the same hash as A2 and won't get imported
|
||||
builder.push_transfer(Transfer {
|
||||
from: Keyring::Alice.to_raw_public().into(),
|
||||
to: Keyring::Ferdie.to_raw_public().into(),
|
||||
amount: 41,
|
||||
nonce: 0,
|
||||
}).unwrap();
|
||||
let b2 = builder.bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, b2.clone()).unwrap();
|
||||
|
||||
// B2 -> B3
|
||||
let b3 = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap().bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, b3.clone()).unwrap();
|
||||
|
||||
// B3 -> B4
|
||||
let b4 = client.new_block_at(&BlockId::Hash(b3.hash())).unwrap().bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, b4.clone()).unwrap();
|
||||
|
||||
// // B2 -> C3
|
||||
let mut builder = client.new_block_at(&BlockId::Hash(b2.hash())).unwrap();
|
||||
// this push is required as otherwise C3 has the same hash as B3 and won't get imported
|
||||
builder.push_transfer(Transfer {
|
||||
from: Keyring::Alice.to_raw_public().into(),
|
||||
to: Keyring::Ferdie.to_raw_public().into(),
|
||||
amount: 1,
|
||||
nonce: 1,
|
||||
}).unwrap();
|
||||
let c3 = builder.bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, c3.clone()).unwrap();
|
||||
|
||||
// A1 -> D2
|
||||
let mut builder = client.new_block_at(&BlockId::Hash(a1.hash())).unwrap();
|
||||
// this push is required as otherwise D2 has the same hash as B2 and won't get imported
|
||||
builder.push_transfer(Transfer {
|
||||
from: Keyring::Alice.to_raw_public().into(),
|
||||
to: Keyring::Ferdie.to_raw_public().into(),
|
||||
amount: 1,
|
||||
nonce: 0,
|
||||
}).unwrap();
|
||||
let d2 = builder.bake().unwrap();
|
||||
client.justify_and_import(BlockOrigin::Own, d2.clone()).unwrap();
|
||||
|
||||
assert_eq!(client.info().unwrap().chain.best_hash, a5.hash());
|
||||
|
||||
let genesis_hash = client.info().unwrap().chain.genesis_hash;
|
||||
let leaves = BlockchainBackendT::leaves(client.backend().blockchain()).unwrap();
|
||||
|
||||
assert!(leaves.contains(&a5.hash()));
|
||||
assert!(leaves.contains(&b4.hash()));
|
||||
assert!(leaves.contains(&c3.hash()));
|
||||
assert!(leaves.contains(&d2.hash()));
|
||||
assert_eq!(leaves.len(), 4);
|
||||
|
||||
// search without restriction
|
||||
|
||||
assert_eq!(a5.hash(), client.best_containing(genesis_hash, None).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a1.hash(), None).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a2.hash(), None).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a3.hash(), None).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a4.hash(), None).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a5.hash(), None).unwrap().unwrap());
|
||||
|
||||
assert_eq!(b4.hash(), client.best_containing(b2.hash(), None).unwrap().unwrap());
|
||||
assert_eq!(b4.hash(), client.best_containing(b3.hash(), None).unwrap().unwrap());
|
||||
assert_eq!(b4.hash(), client.best_containing(b4.hash(), None).unwrap().unwrap());
|
||||
|
||||
assert_eq!(c3.hash(), client.best_containing(c3.hash(), None).unwrap().unwrap());
|
||||
|
||||
assert_eq!(d2.hash(), client.best_containing(d2.hash(), None).unwrap().unwrap());
|
||||
|
||||
|
||||
// search only blocks with number <= 5. equivalent to without restriction for this scenario
|
||||
|
||||
assert_eq!(a5.hash(), client.best_containing(genesis_hash, Some(5)).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a1.hash(), Some(5)).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a2.hash(), Some(5)).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a3.hash(), Some(5)).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a4.hash(), Some(5)).unwrap().unwrap());
|
||||
assert_eq!(a5.hash(), client.best_containing(a5.hash(), Some(5)).unwrap().unwrap());
|
||||
|
||||
assert_eq!(b4.hash(), client.best_containing(b2.hash(), Some(5)).unwrap().unwrap());
|
||||
assert_eq!(b4.hash(), client.best_containing(b3.hash(), Some(5)).unwrap().unwrap());
|
||||
assert_eq!(b4.hash(), client.best_containing(b4.hash(), Some(5)).unwrap().unwrap());
|
||||
|
||||
assert_eq!(c3.hash(), client.best_containing(c3.hash(), Some(5)).unwrap().unwrap());
|
||||
|
||||
assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(5)).unwrap().unwrap());
|
||||
|
||||
|
||||
// search only blocks with number <= 4
|
||||
|
||||
assert_eq!(a4.hash(), client.best_containing(genesis_hash, Some(4)).unwrap().unwrap());
|
||||
assert_eq!(a4.hash(), client.best_containing(a1.hash(), Some(4)).unwrap().unwrap());
|
||||
assert_eq!(a4.hash(), client.best_containing(a2.hash(), Some(4)).unwrap().unwrap());
|
||||
assert_eq!(a4.hash(), client.best_containing(a3.hash(), Some(4)).unwrap().unwrap());
|
||||
assert_eq!(a4.hash(), client.best_containing(a4.hash(), Some(4)).unwrap().unwrap());
|
||||
assert_eq!(None, client.best_containing(a5.hash(), Some(4)).unwrap());
|
||||
|
||||
assert_eq!(b4.hash(), client.best_containing(b2.hash(), Some(4)).unwrap().unwrap());
|
||||
assert_eq!(b4.hash(), client.best_containing(b3.hash(), Some(4)).unwrap().unwrap());
|
||||
assert_eq!(b4.hash(), client.best_containing(b4.hash(), Some(4)).unwrap().unwrap());
|
||||
|
||||
assert_eq!(c3.hash(), client.best_containing(c3.hash(), Some(4)).unwrap().unwrap());
|
||||
|
||||
assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(4)).unwrap().unwrap());
|
||||
|
||||
|
||||
// search only blocks with number <= 3
|
||||
|
||||
assert_eq!(a3.hash(), client.best_containing(genesis_hash, Some(3)).unwrap().unwrap());
|
||||
assert_eq!(a3.hash(), client.best_containing(a1.hash(), Some(3)).unwrap().unwrap());
|
||||
assert_eq!(a3.hash(), client.best_containing(a2.hash(), Some(3)).unwrap().unwrap());
|
||||
assert_eq!(a3.hash(), client.best_containing(a3.hash(), Some(3)).unwrap().unwrap());
|
||||
assert_eq!(None, client.best_containing(a4.hash(), Some(3)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a5.hash(), Some(3)).unwrap());
|
||||
|
||||
assert_eq!(b3.hash(), client.best_containing(b2.hash(), Some(3)).unwrap().unwrap());
|
||||
assert_eq!(b3.hash(), client.best_containing(b3.hash(), Some(3)).unwrap().unwrap());
|
||||
assert_eq!(None, client.best_containing(b4.hash(), Some(3)).unwrap());
|
||||
|
||||
assert_eq!(c3.hash(), client.best_containing(c3.hash(), Some(3)).unwrap().unwrap());
|
||||
|
||||
assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(3)).unwrap().unwrap());
|
||||
|
||||
|
||||
// search only blocks with number <= 2
|
||||
|
||||
assert_eq!(a2.hash(), client.best_containing(genesis_hash, Some(2)).unwrap().unwrap());
|
||||
assert_eq!(a2.hash(), client.best_containing(a1.hash(), Some(2)).unwrap().unwrap());
|
||||
assert_eq!(a2.hash(), client.best_containing(a2.hash(), Some(2)).unwrap().unwrap());
|
||||
assert_eq!(None, client.best_containing(a3.hash(), Some(2)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a4.hash(), Some(2)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a5.hash(), Some(2)).unwrap());
|
||||
|
||||
assert_eq!(b2.hash(), client.best_containing(b2.hash(), Some(2)).unwrap().unwrap());
|
||||
assert_eq!(None, client.best_containing(b3.hash(), Some(2)).unwrap());
|
||||
assert_eq!(None, client.best_containing(b4.hash(), Some(2)).unwrap());
|
||||
|
||||
assert_eq!(None, client.best_containing(c3.hash(), Some(2)).unwrap());
|
||||
|
||||
assert_eq!(d2.hash(), client.best_containing(d2.hash(), Some(2)).unwrap().unwrap());
|
||||
|
||||
|
||||
// search only blocks with number <= 1
|
||||
|
||||
assert_eq!(a1.hash(), client.best_containing(genesis_hash, Some(1)).unwrap().unwrap());
|
||||
assert_eq!(a1.hash(), client.best_containing(a1.hash(), Some(1)).unwrap().unwrap());
|
||||
assert_eq!(None, client.best_containing(a2.hash(), Some(1)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a3.hash(), Some(1)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a4.hash(), Some(1)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a5.hash(), Some(1)).unwrap());
|
||||
|
||||
assert_eq!(None, client.best_containing(b2.hash(), Some(1)).unwrap());
|
||||
assert_eq!(None, client.best_containing(b3.hash(), Some(1)).unwrap());
|
||||
assert_eq!(None, client.best_containing(b4.hash(), Some(1)).unwrap());
|
||||
|
||||
assert_eq!(None, client.best_containing(c3.hash(), Some(1)).unwrap());
|
||||
|
||||
assert_eq!(None, client.best_containing(d2.hash(), Some(1)).unwrap());
|
||||
|
||||
// search only blocks with number <= 0
|
||||
|
||||
assert_eq!(genesis_hash, client.best_containing(genesis_hash, Some(0)).unwrap().unwrap());
|
||||
assert_eq!(None, client.best_containing(a1.hash(), Some(0)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a2.hash(), Some(0)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a3.hash(), Some(0)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a4.hash(), Some(0)).unwrap());
|
||||
assert_eq!(None, client.best_containing(a5.hash(), Some(0)).unwrap());
|
||||
|
||||
assert_eq!(None, client.best_containing(b2.hash(), Some(0)).unwrap());
|
||||
assert_eq!(None, client.best_containing(b3.hash(), Some(0)).unwrap());
|
||||
assert_eq!(None, client.best_containing(b4.hash(), Some(0)).unwrap());
|
||||
|
||||
assert_eq!(None, client.best_containing(c3.hash().clone(), Some(0)).unwrap());
|
||||
|
||||
assert_eq!(None, client.best_containing(d2.hash().clone(), Some(0)).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,7 @@ use state_machine::backend::{Backend as StateBackend, InMemory};
|
||||
use state_machine::InMemoryChangesTrieStorage;
|
||||
use hash_db::Hasher;
|
||||
use heapsize::HeapSizeOf;
|
||||
use leaves::LeafSet;
|
||||
use trie::MemoryDB;
|
||||
|
||||
struct PendingBlock<B: BlockT> {
|
||||
@@ -69,7 +70,7 @@ impl<B: BlockT> StoredBlock<B> {
|
||||
fn extrinsics(&self) -> Option<&[B::Extrinsic]> {
|
||||
match *self {
|
||||
StoredBlock::Header(_, _) => None,
|
||||
StoredBlock::Full(ref b, _) => Some(b.extrinsics())
|
||||
StoredBlock::Full(ref b, _) => Some(b.extrinsics()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,6 +94,7 @@ struct BlockchainStorage<Block: BlockT> {
|
||||
finalized_hash: Block::Hash,
|
||||
genesis_hash: Block::Hash,
|
||||
cht_roots: HashMap<NumberFor<Block>, Block::Hash>,
|
||||
leaves: LeafSet<Block::Hash, NumberFor<Block>>,
|
||||
}
|
||||
|
||||
/// In-memory blockchain. Supports concurrent reads.
|
||||
@@ -139,6 +141,7 @@ impl<Block: BlockT> Blockchain<Block> {
|
||||
finalized_hash: Default::default(),
|
||||
genesis_hash: Default::default(),
|
||||
cht_roots: HashMap::new(),
|
||||
leaves: LeafSet::new(),
|
||||
}));
|
||||
Blockchain {
|
||||
storage: storage.clone(),
|
||||
@@ -157,16 +160,50 @@ impl<Block: BlockT> Blockchain<Block> {
|
||||
justification: Option<Justification<Block::Hash>>,
|
||||
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
|
||||
new_state: NewBlockState,
|
||||
) {
|
||||
) -> ::error::Result<()> {
|
||||
let number = header.number().clone();
|
||||
let best_tree_route = match new_state.is_best() {
|
||||
false => None,
|
||||
true => {
|
||||
let best_hash = self.storage.read().best_hash;
|
||||
if &best_hash == header.parent_hash() {
|
||||
None
|
||||
} else {
|
||||
let route = ::blockchain::tree_route(
|
||||
self,
|
||||
BlockId::Hash(best_hash),
|
||||
BlockId::Hash(*header.parent_hash()),
|
||||
)?;
|
||||
Some(route)
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let mut storage = self.storage.write();
|
||||
storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification));
|
||||
storage.hashes.insert(number, hash.clone());
|
||||
|
||||
storage.leaves.import(hash.clone(), number.clone(), header.parent_hash().clone());
|
||||
|
||||
if new_state.is_best() {
|
||||
if let Some(tree_route) = best_tree_route {
|
||||
// apply retraction and enaction when reorganizing up to parent hash
|
||||
let enacted = tree_route.enacted();
|
||||
|
||||
for entry in enacted {
|
||||
storage.hashes.insert(entry.number, entry.hash);
|
||||
}
|
||||
|
||||
for entry in tree_route.retracted().iter().skip(enacted.len()) {
|
||||
storage.hashes.remove(&entry.number);
|
||||
}
|
||||
}
|
||||
|
||||
storage.best_hash = hash.clone();
|
||||
storage.best_number = number.clone();
|
||||
storage.hashes.insert(number, hash.clone());
|
||||
}
|
||||
|
||||
storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification));
|
||||
|
||||
if let NewBlockState::Final = new_state {
|
||||
storage.finalized_hash = hash;
|
||||
}
|
||||
@@ -174,6 +211,8 @@ impl<Block: BlockT> Blockchain<Block> {
|
||||
if number == Zero::zero() {
|
||||
storage.genesis_hash = hash;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Compare this blockchain with another in-mem blockchain
|
||||
@@ -262,6 +301,10 @@ impl<Block: BlockT> blockchain::Backend<Block> for Blockchain<Block> {
|
||||
fn cache(&self) -> Option<&blockchain::Cache<Block>> {
|
||||
Some(&self.cache)
|
||||
}
|
||||
|
||||
fn leaves(&self) -> error::Result<Vec<Block::Hash>> {
|
||||
Ok(self.storage.read().leaves.hashes())
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> light::blockchain::Storage<Block> for Blockchain<Block>
|
||||
@@ -276,7 +319,7 @@ impl<Block: BlockT> light::blockchain::Storage<Block> for Blockchain<Block>
|
||||
) -> error::Result<()> {
|
||||
let hash = header.hash();
|
||||
let parent_hash = *header.parent_hash();
|
||||
self.insert(hash, header, None, None, state);
|
||||
self.insert(hash, header, None, None, state)?;
|
||||
if state.is_best() {
|
||||
self.cache.insert(parent_hash, authorities);
|
||||
}
|
||||
@@ -436,7 +479,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
self.blockchain.insert(hash, header, justification, body, pending_block.state);
|
||||
self.blockchain.insert(hash, header, justification, body, pending_block.state)?;
|
||||
// dumb implementation - store value for each block
|
||||
if pending_block.state.is_best() {
|
||||
self.blockchain.cache.insert(parent_hash, operation.pending_authorities);
|
||||
@@ -501,3 +544,26 @@ pub fn cache_authorities_at<Block: BlockT>(
|
||||
) {
|
||||
blockchain.cache.insert(at, authorities);
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::Arc;
|
||||
use test_client;
|
||||
use primitives::Blake2Hasher;
|
||||
|
||||
type TestBackend = test_client::client::in_mem::Backend<test_client::runtime::Block, Blake2Hasher>;
|
||||
|
||||
#[test]
|
||||
fn test_leaves_with_complex_block_tree() {
|
||||
let backend = Arc::new(TestBackend::new());
|
||||
|
||||
test_client::trait_tests::test_leaves_for_backend(backend);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blockchain_query_by_number_gets_canonical() {
|
||||
let backend = Arc::new(TestBackend::new());
|
||||
|
||||
test_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,204 @@
|
||||
// Copyright 2018 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
use std::cmp::{Ord, Ordering};
|
||||
use kvdb::{KeyValueDB, DBTransaction};
|
||||
use runtime_primitives::traits::SimpleArithmetic;
|
||||
use codec::{Encode, Decode};
|
||||
use error;
|
||||
|
||||
/// helper wrapper type to keep a list of block hashes ordered
|
||||
/// by `number` descending in a `BTreeSet` which allows faster and simpler
|
||||
/// insertion and removal than keeping them in a list.
|
||||
#[derive(Debug, Clone)]
|
||||
struct LeafSetItem<H, N> {
|
||||
hash: H,
|
||||
number: N,
|
||||
}
|
||||
|
||||
impl<H, N> Ord for LeafSetItem<H, N> where N: Ord {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
// reverse (descending) order
|
||||
other.number.cmp(&self.number)
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, N> PartialOrd for LeafSetItem<H, N> where N: PartialOrd {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
// reverse (descending) order
|
||||
other.number.partial_cmp(&self.number)
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, N> PartialEq for LeafSetItem<H, N> where N: PartialEq {
|
||||
fn eq(&self, other: &LeafSetItem<H, N>) -> bool {
|
||||
self.number == other.number
|
||||
}
|
||||
}
|
||||
|
||||
impl<H, N> Eq for LeafSetItem<H, N> where N: PartialEq {}
|
||||
|
||||
/// A displaced leaf after import.
|
||||
pub struct DisplacedLeaf<H, N> {
|
||||
new_hash: H,
|
||||
displaced: LeafSetItem<H, N>,
|
||||
}
|
||||
|
||||
/// list of leaf hashes ordered by number (descending).
|
||||
/// stored in memory for fast access.
|
||||
/// this allows very fast checking and modification of active leaves.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct LeafSet<H, N> {
|
||||
storage: BTreeSet<LeafSetItem<H, N>>,
|
||||
}
|
||||
|
||||
impl<H, N> LeafSet<H, N> where
|
||||
H: Clone + Decode + Encode,
|
||||
N: Clone + SimpleArithmetic + Decode + Encode,
|
||||
{
|
||||
/// Construct a new, blank leaf set.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
storage: BTreeSet::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the leaf list from the DB, using given prefix for keys.
|
||||
pub fn read_from_db(db: &KeyValueDB, column: Option<u32>, prefix: &[u8]) -> error::Result<Self> {
|
||||
let mut storage = BTreeSet::new();
|
||||
|
||||
for (key, value) in db.iter_from_prefix(column, prefix) {
|
||||
let raw_hash = &mut &key[prefix.len()..];
|
||||
let hash = match Decode::decode(raw_hash) {
|
||||
Some(hash) => hash,
|
||||
None => return Err(error::ErrorKind::Backend("Error decoding hash".into()).into()),
|
||||
};
|
||||
let number = match Decode::decode(&mut &value[..]) {
|
||||
Some(number) => number,
|
||||
None => return Err(error::ErrorKind::Backend("Error decoding number".into()).into()),
|
||||
};
|
||||
storage.insert(LeafSetItem { hash, number });
|
||||
}
|
||||
Ok(Self { storage })
|
||||
}
|
||||
|
||||
/// update the leaf list on import. returns a displaced leaf if there was one.
|
||||
pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option<DisplacedLeaf<H, N>> {
|
||||
// avoid underflow for genesis.
|
||||
let displaced = if number != N::zero() {
|
||||
let displaced = LeafSetItem {
|
||||
hash: parent_hash,
|
||||
number: number.clone() - N::one(),
|
||||
};
|
||||
let was_displaced = self.storage.remove(&displaced);
|
||||
|
||||
if was_displaced {
|
||||
Some(DisplacedLeaf {
|
||||
new_hash: hash.clone(),
|
||||
displaced,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.storage.insert(LeafSetItem { hash, number });
|
||||
displaced
|
||||
}
|
||||
|
||||
/// Undo an import operation, with a displaced leaf.
|
||||
pub fn undo(&mut self, displaced: DisplacedLeaf<H, N>) {
|
||||
let new_number = displaced.displaced.number.clone() + N::one();
|
||||
self.storage.remove(&LeafSetItem { hash: displaced.new_hash, number: new_number });
|
||||
self.storage.insert(displaced.displaced);
|
||||
}
|
||||
|
||||
/// currently since revert only affects the canonical chain
|
||||
/// we assume that parent has no further children
|
||||
/// and we add it as leaf again
|
||||
pub fn revert(&mut self, hash: H, number: N, parent_hash: H) {
|
||||
self.storage.insert(LeafSetItem {
|
||||
hash: parent_hash,
|
||||
number: number.clone() - N::one(),
|
||||
});
|
||||
self.storage.remove(&LeafSetItem { hash, number });
|
||||
}
|
||||
|
||||
/// returns an iterator over all hashes in the leaf set
|
||||
/// ordered by their block number descending.
|
||||
pub fn hashes(&self) -> Vec<H> {
|
||||
self.storage.iter().map(|item| item.hash.clone()).collect()
|
||||
}
|
||||
|
||||
/// Write the leaf list to the database transaction.
|
||||
pub fn prepare_transaction(&self, tx: &mut DBTransaction, column: Option<u32>, prefix: &[u8]) {
|
||||
let mut buf = prefix.to_vec();
|
||||
for &LeafSetItem { ref hash, ref number } in &self.storage {
|
||||
hash.using_encoded(|s| buf.extend(s));
|
||||
tx.put_vec(column, &buf[..], number.encode());
|
||||
buf.truncate(prefix.len()); // reuse allocation.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let mut set = LeafSet::new();
|
||||
set.import(0u32, 0u32, 0u32);
|
||||
|
||||
set.import(1_1, 1, 0);
|
||||
set.import(2_1, 2, 1_1);
|
||||
set.import(3_1, 3, 2_1);
|
||||
|
||||
assert!(set.storage.contains(&LeafSetItem { hash: 3_1, number: 3 }));
|
||||
assert!(!set.storage.contains(&LeafSetItem { hash: 2_1, number: 2 }));
|
||||
assert!(!set.storage.contains(&LeafSetItem { hash: 1_1, number: 1 }));
|
||||
assert!(!set.storage.contains(&LeafSetItem { hash: 0, number: 0 }));
|
||||
|
||||
set.import(2_2, 2, 1_1);
|
||||
|
||||
assert!(set.storage.contains(&LeafSetItem { hash: 3_1, number: 3 }));
|
||||
assert!(set.storage.contains(&LeafSetItem { hash: 2_2, number: 2 }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flush_to_disk() {
|
||||
const PREFIX: &[u8] = b"abcdefg";
|
||||
let db = ::kvdb_memorydb::create(0);
|
||||
|
||||
let mut set = LeafSet::new();
|
||||
set.import(0u32, 0u32, 0u32);
|
||||
|
||||
set.import(1_1, 1, 0);
|
||||
set.import(2_1, 2, 1_1);
|
||||
set.import(3_1, 3, 2_1);
|
||||
|
||||
let mut tx = DBTransaction::new();
|
||||
|
||||
set.prepare_transaction(&mut tx, None, PREFIX);
|
||||
db.write(tx).unwrap();
|
||||
|
||||
let set2 = LeafSet::read_from_db(&db, None, PREFIX).unwrap();
|
||||
assert_eq!(set, set2);
|
||||
}
|
||||
}
|
||||
@@ -40,11 +40,13 @@ extern crate hash_db;
|
||||
extern crate rlp;
|
||||
extern crate heapsize;
|
||||
extern crate memory_db;
|
||||
extern crate kvdb;
|
||||
|
||||
#[macro_use] extern crate error_chain;
|
||||
#[macro_use] extern crate log;
|
||||
#[cfg_attr(test, macro_use)] extern crate substrate_executor as executor;
|
||||
#[cfg(test)] #[macro_use] extern crate hex_literal;
|
||||
#[cfg(test)] extern crate kvdb_memorydb;
|
||||
|
||||
pub mod error;
|
||||
pub mod blockchain;
|
||||
@@ -54,6 +56,7 @@ pub mod in_mem;
|
||||
pub mod genesis;
|
||||
pub mod block_builder;
|
||||
pub mod light;
|
||||
mod leaves;
|
||||
mod call_executor;
|
||||
mod client;
|
||||
mod notifications;
|
||||
@@ -61,9 +64,11 @@ mod notifications;
|
||||
pub use blockchain::Info as ChainInfo;
|
||||
pub use call_executor::{CallResult, CallExecutor, LocalCallExecutor};
|
||||
pub use client::{
|
||||
new_with_backend,
|
||||
new_in_mem,
|
||||
BlockBody, BlockStatus, BlockOrigin, ImportNotifications, FinalityNotifications, BlockchainEvents,
|
||||
Client, ClientInfo, ChainHead, ImportResult, JustifiedHeader,
|
||||
};
|
||||
pub use notifications::{StorageEventStream, StorageChangeSet};
|
||||
pub use state_machine::ExecutionStrategy;
|
||||
pub use leaves::LeafSet;
|
||||
|
||||
@@ -150,4 +150,8 @@ impl<S, F, Block> BlockchainBackend<Block> for Blockchain<S, F> where Block: Blo
|
||||
fn cache(&self) -> Option<&BlockchainCache<Block>> {
|
||||
self.storage.cache()
|
||||
}
|
||||
|
||||
fn leaves(&self) -> ClientResult<Vec<Block::Hash>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user