mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-01 01:57:56 +00:00
Move sc-client into sc-service (#5502)
* Drop client from sc-network and sc-client-db, move LongestChain to sc-client-api * move leaves, cht, in_mem to sc-client-api, drop client from sc-finality-grandpa * drop sc-service from sc-rpc * drop sc-service from sc-consensus-aura * drop sc-client from manual-seal and babe * drop sc-client from utils/frame/rpc/system and utils/frame/benchmarking-cli * drop sc-client from bin/node and bin/node-template * drop sc-client * fix tests * remove check -p sc-client from gitlab.yml * fix warnings * fixes ui test * fix light client tests * adds associated Client type to AbstractService * adds UsageProvider to Client * fixed ui test, again * tried and failed to get node-cli to compile for wasm * thanks to tomaka for helping me get node-cli to compile for wasmm * ui test pls pas 🙏🏾 * all tests passing 🪄 * no_run documentation code * rm -f documentation code * ClientProvider * fix mega trait * move LongestChain to sc-consensus, use adds minimal bounds to AbstractService::Client * adds license to sc-consensus Co-authored-by: Benjamin Kampmann <ben@parity.io>
This commit is contained in:
@@ -79,6 +79,25 @@ pub struct ClientImportOperation<Block: BlockT, B: Backend<Block>> {
|
||||
pub notify_finalized: Vec<Block::Hash>,
|
||||
}
|
||||
|
||||
/// Helper function to apply auxiliary data insertion into an operation.
|
||||
pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, D, I>(
|
||||
operation: &mut ClientImportOperation<Block, B>,
|
||||
insert: I,
|
||||
delete: D,
|
||||
) -> sp_blockchain::Result<()>
|
||||
where
|
||||
Block: BlockT,
|
||||
B: Backend<Block>,
|
||||
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
|
||||
D: IntoIterator<Item=&'a &'b [u8]>,
|
||||
{
|
||||
operation.op.insert_aux(
|
||||
insert.into_iter()
|
||||
.map(|(k, v)| (k.to_vec(), Some(v.to_vec())))
|
||||
.chain(delete.into_iter().map(|k| (k.to_vec(), None)))
|
||||
)
|
||||
}
|
||||
|
||||
/// State of a new block.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum NewBlockState {
|
||||
|
||||
@@ -0,0 +1,466 @@
|
||||
// Copyright 2017-2020 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Canonical hash trie definitions and helper functions.
|
||||
//!
|
||||
//! Each CHT is a trie mapping block numbers to canonical hash.
|
||||
//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in
|
||||
//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply
|
||||
//! request an inclusion proof of a specific block number against the trie with the
|
||||
//! root has. A correct proof implies that the claimed block is identical to the one
|
||||
//! we discarded.
|
||||
|
||||
use hash_db;
|
||||
use codec::Encode;
|
||||
use sp_trie;
|
||||
|
||||
use sp_core::{H256, convert_hash};
|
||||
use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One};
|
||||
use sp_state_machine::{
|
||||
MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend,
|
||||
prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend
|
||||
};
|
||||
|
||||
use sp_blockchain::{Error as ClientError, Result as ClientResult};
|
||||
|
||||
/// The size of each CHT. This value is passed to every CHT-related function from
|
||||
/// production code. Other values are passed from tests.
|
||||
const SIZE: u32 = 2048;
|
||||
|
||||
/// Gets default CHT size.
|
||||
pub fn size<N: From<u32>>() -> N {
|
||||
SIZE.into()
|
||||
}
|
||||
|
||||
/// Returns Some(cht_number) if CHT is need to be built when the block with given number is canonized.
|
||||
pub fn is_build_required<N>(cht_size: N, block_num: N) -> Option<N>
|
||||
where
|
||||
N: Clone + AtLeast32Bit,
|
||||
{
|
||||
let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?;
|
||||
let two = N::one() + N::one();
|
||||
if block_cht_num < two {
|
||||
return None;
|
||||
}
|
||||
let cht_start = start_number(cht_size, block_cht_num.clone());
|
||||
if cht_start != block_num {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(block_cht_num - two)
|
||||
}
|
||||
|
||||
/// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number.
|
||||
pub fn max_cht_number<N>(cht_size: N, max_canonical_block: N) -> Option<N>
|
||||
where
|
||||
N: Clone + AtLeast32Bit,
|
||||
{
|
||||
let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?;
|
||||
let two = N::one() + N::one();
|
||||
if max_cht_number < two {
|
||||
return None;
|
||||
}
|
||||
Some(max_cht_number - two)
|
||||
}
|
||||
|
||||
/// Compute a CHT root from an iterator of block hashes. Fails if shorter than
|
||||
/// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`.
|
||||
/// Discards the trie's nodes.
|
||||
pub fn compute_root<Header, Hasher, I>(
|
||||
cht_size: Header::Number,
|
||||
cht_num: Header::Number,
|
||||
hashes: I,
|
||||
) -> ClientResult<Hasher::Out>
|
||||
where
|
||||
Header: HeaderT,
|
||||
Hasher: hash_db::Hasher,
|
||||
Hasher::Out: Ord,
|
||||
I: IntoIterator<Item=ClientResult<Option<Header::Hash>>>,
|
||||
{
|
||||
use sp_trie::TrieConfiguration;
|
||||
Ok(sp_trie::trie_types::Layout::<Hasher>::trie_root(
|
||||
build_pairs::<Header, I>(cht_size, cht_num, hashes)?
|
||||
))
|
||||
}
|
||||
|
||||
/// Build CHT-based header proof.
|
||||
pub fn build_proof<Header, Hasher, BlocksI, HashesI>(
|
||||
cht_size: Header::Number,
|
||||
cht_num: Header::Number,
|
||||
blocks: BlocksI,
|
||||
hashes: HashesI
|
||||
) -> ClientResult<StorageProof>
|
||||
where
|
||||
Header: HeaderT,
|
||||
Hasher: hash_db::Hasher,
|
||||
Hasher::Out: Ord + codec::Codec,
|
||||
BlocksI: IntoIterator<Item=Header::Number>,
|
||||
HashesI: IntoIterator<Item=ClientResult<Option<Header::Hash>>>,
|
||||
{
|
||||
let transaction = build_pairs::<Header, _>(cht_size, cht_num, hashes)?
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, Some(v)))
|
||||
.collect::<Vec<_>>();
|
||||
let mut storage = InMemoryBackend::<Hasher>::default().update(vec![(None, transaction)]);
|
||||
let trie_storage = storage.as_trie_backend()
|
||||
.expect("InMemoryState::as_trie_backend always returns Some; qed");
|
||||
prove_read_on_trie_backend(
|
||||
trie_storage,
|
||||
blocks.into_iter().map(|number| encode_cht_key(number)),
|
||||
).map_err(ClientError::Execution)
|
||||
}
|
||||
|
||||
/// Check CHT-based header proof.
|
||||
pub fn check_proof<Header, Hasher>(
|
||||
local_root: Header::Hash,
|
||||
local_number: Header::Number,
|
||||
remote_hash: Header::Hash,
|
||||
remote_proof: StorageProof,
|
||||
) -> ClientResult<()>
|
||||
where
|
||||
Header: HeaderT,
|
||||
Hasher: hash_db::Hasher,
|
||||
Hasher::Out: Ord + codec::Codec,
|
||||
{
|
||||
do_check_proof::<Header, Hasher, _>(
|
||||
local_root,
|
||||
local_number,
|
||||
remote_hash,
|
||||
move |local_root, local_cht_key|
|
||||
read_proof_check::<Hasher, _>(
|
||||
local_root,
|
||||
remote_proof,
|
||||
::std::iter::once(local_cht_key),
|
||||
)
|
||||
.map(|mut map| map
|
||||
.remove(local_cht_key)
|
||||
.expect("checked proof of local_cht_key; qed"))
|
||||
.map_err(|e| ClientError::from(e)),
|
||||
)
|
||||
}
|
||||
|
||||
/// Check CHT-based header proof on pre-created proving backend.
|
||||
pub fn check_proof_on_proving_backend<Header, Hasher>(
|
||||
local_root: Header::Hash,
|
||||
local_number: Header::Number,
|
||||
remote_hash: Header::Hash,
|
||||
proving_backend: &TrieBackend<MemoryDB<Hasher>, Hasher>,
|
||||
) -> ClientResult<()>
|
||||
where
|
||||
Header: HeaderT,
|
||||
Hasher: hash_db::Hasher,
|
||||
Hasher::Out: Ord + codec::Codec,
|
||||
{
|
||||
do_check_proof::<Header, Hasher, _>(
|
||||
local_root,
|
||||
local_number,
|
||||
remote_hash,
|
||||
|_, local_cht_key|
|
||||
read_proof_check_on_proving_backend::<Hasher>(
|
||||
proving_backend,
|
||||
local_cht_key,
|
||||
).map_err(|e| ClientError::from(e)),
|
||||
)
|
||||
}
|
||||
|
||||
/// Check CHT-based header proof using passed checker function.
|
||||
fn do_check_proof<Header, Hasher, F>(
|
||||
local_root: Header::Hash,
|
||||
local_number: Header::Number,
|
||||
remote_hash: Header::Hash,
|
||||
checker: F,
|
||||
) -> ClientResult<()>
|
||||
where
|
||||
Header: HeaderT,
|
||||
Hasher: hash_db::Hasher,
|
||||
Hasher::Out: Ord,
|
||||
F: FnOnce(Hasher::Out, &[u8]) -> ClientResult<Option<Vec<u8>>>,
|
||||
{
|
||||
let root: Hasher::Out = convert_hash(&local_root);
|
||||
let local_cht_key = encode_cht_key(local_number);
|
||||
let local_cht_value = checker(root, &local_cht_key)?;
|
||||
let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?;
|
||||
let local_hash = decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?;
|
||||
match &local_hash[..] == remote_hash.as_ref() {
|
||||
true => Ok(()),
|
||||
false => Err(ClientError::InvalidCHTProof.into()),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// Group ordered blocks by CHT number and call functor with blocks of each group.
|
||||
pub fn for_each_cht_group<Header, I, F, P>(
|
||||
cht_size: Header::Number,
|
||||
blocks: I,
|
||||
mut functor: F,
|
||||
mut functor_param: P,
|
||||
) -> ClientResult<()>
|
||||
where
|
||||
Header: HeaderT,
|
||||
I: IntoIterator<Item=Header::Number>,
|
||||
F: FnMut(P, Header::Number, Vec<Header::Number>) -> ClientResult<P>,
|
||||
{
|
||||
let mut current_cht_num = None;
|
||||
let mut current_cht_blocks = Vec::new();
|
||||
for block in blocks {
|
||||
let new_cht_num = match block_to_cht_number(cht_size, block) {
|
||||
Some(new_cht_num) => new_cht_num,
|
||||
None => return Err(ClientError::Backend(format!(
|
||||
"Cannot compute CHT root for the block #{}", block)).into()
|
||||
),
|
||||
};
|
||||
|
||||
let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num);
|
||||
if advance_to_next_cht {
|
||||
let current_cht_num = current_cht_num.expect("advance_to_next_cht is true;
|
||||
it is true only when current_cht_num is Some; qed");
|
||||
assert!(new_cht_num > current_cht_num, "for_each_cht_group only supports ordered iterators");
|
||||
|
||||
functor_param = functor(
|
||||
functor_param,
|
||||
current_cht_num,
|
||||
::std::mem::replace(&mut current_cht_blocks, Vec::new()),
|
||||
)?;
|
||||
}
|
||||
|
||||
current_cht_blocks.push(block);
|
||||
current_cht_num = Some(new_cht_num);
|
||||
}
|
||||
|
||||
if let Some(current_cht_num) = current_cht_num {
|
||||
functor(
|
||||
functor_param,
|
||||
current_cht_num,
|
||||
::std::mem::replace(&mut current_cht_blocks, Vec::new()),
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build pairs for computing CHT.
|
||||
fn build_pairs<Header, I>(
|
||||
cht_size: Header::Number,
|
||||
cht_num: Header::Number,
|
||||
hashes: I
|
||||
) -> ClientResult<Vec<(Vec<u8>, Vec<u8>)>>
|
||||
where
|
||||
Header: HeaderT,
|
||||
I: IntoIterator<Item=ClientResult<Option<Header::Hash>>>,
|
||||
{
|
||||
let start_num = start_number(cht_size, cht_num);
|
||||
let mut pairs = Vec::new();
|
||||
let mut hash_index = Header::Number::zero();
|
||||
for hash in hashes.into_iter() {
|
||||
let hash = hash?.ok_or_else(|| ClientError::from(
|
||||
ClientError::MissingHashRequiredForCHT
|
||||
))?;
|
||||
pairs.push((
|
||||
encode_cht_key(start_num + hash_index).to_vec(),
|
||||
encode_cht_value(hash)
|
||||
));
|
||||
hash_index += Header::Number::one();
|
||||
if hash_index == cht_size {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if hash_index == cht_size {
|
||||
Ok(pairs)
|
||||
} else {
|
||||
Err(ClientError::MissingHashRequiredForCHT)
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the starting block of a given CHT.
|
||||
/// CHT 0 includes block 1...SIZE,
|
||||
/// CHT 1 includes block SIZE + 1 ... 2*SIZE
|
||||
/// More generally: CHT N includes block (1 + N*SIZE)...((N+1)*SIZE).
|
||||
/// This is because the genesis hash is assumed to be known
|
||||
/// and including it would be redundant.
|
||||
pub fn start_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
|
||||
(cht_num * cht_size) + N::one()
|
||||
}
|
||||
|
||||
/// Get the ending block of a given CHT.
|
||||
pub fn end_number<N: AtLeast32Bit>(cht_size: N, cht_num: N) -> N {
|
||||
(cht_num + N::one()) * cht_size
|
||||
}
|
||||
|
||||
/// Convert a block number to a CHT number.
|
||||
/// Returns `None` for `block_num` == 0, `Some` otherwise.
|
||||
pub fn block_to_cht_number<N: AtLeast32Bit>(cht_size: N, block_num: N) -> Option<N> {
|
||||
if block_num == N::zero() {
|
||||
None
|
||||
} else {
|
||||
Some((block_num - N::one()) / cht_size)
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert header number into CHT key.
|
||||
pub fn encode_cht_key<N: Encode>(number: N) -> Vec<u8> {
|
||||
number.encode()
|
||||
}
|
||||
|
||||
/// Convert header hash into CHT value.
|
||||
fn encode_cht_value<Hash: AsRef<[u8]>>(hash: Hash) -> Vec<u8> {
|
||||
hash.as_ref().to_vec()
|
||||
}
|
||||
|
||||
/// Convert CHT value into block header hash.
|
||||
pub fn decode_cht_value(value: &[u8]) -> Option<H256> {
|
||||
match value.len() {
|
||||
32 => Some(H256::from_slice(&value[0..32])),
|
||||
_ => None,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use sp_runtime::{generic, traits::BlakeTwo256};
|
||||
|
||||
type Header = generic::Header<u64, BlakeTwo256>;
|
||||
|
||||
#[test]
|
||||
fn is_build_required_works() {
|
||||
assert_eq!(is_build_required(SIZE, 0u32.into()), None);
|
||||
assert_eq!(is_build_required(SIZE, 1u32.into()), None);
|
||||
assert_eq!(is_build_required(SIZE, SIZE), None);
|
||||
assert_eq!(is_build_required(SIZE, SIZE + 1), None);
|
||||
assert_eq!(is_build_required(SIZE, 2 * SIZE), None);
|
||||
assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0));
|
||||
assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None);
|
||||
assert_eq!(is_build_required(SIZE, 3 * SIZE), None);
|
||||
assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1));
|
||||
assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn max_cht_number_works() {
|
||||
assert_eq!(max_cht_number(SIZE, 0u32.into()), None);
|
||||
assert_eq!(max_cht_number(SIZE, 1u32.into()), None);
|
||||
assert_eq!(max_cht_number(SIZE, SIZE), None);
|
||||
assert_eq!(max_cht_number(SIZE, SIZE + 1), None);
|
||||
assert_eq!(max_cht_number(SIZE, 2 * SIZE), None);
|
||||
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0));
|
||||
assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0));
|
||||
assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0));
|
||||
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1));
|
||||
assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn start_number_works() {
|
||||
assert_eq!(start_number(SIZE, 0u32), 1u32);
|
||||
assert_eq!(start_number(SIZE, 1u32), SIZE + 1);
|
||||
assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn end_number_works() {
|
||||
assert_eq!(end_number(SIZE, 0u32), SIZE);
|
||||
assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE);
|
||||
assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn build_pairs_fails_when_no_enough_blocks() {
|
||||
assert!(build_pairs::<Header, _>(SIZE as _, 0,
|
||||
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2)).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn build_pairs_fails_when_missing_block() {
|
||||
assert!(build_pairs::<Header, _>(
|
||||
SIZE as _,
|
||||
0,
|
||||
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1))))
|
||||
.take(SIZE as usize / 2)
|
||||
.chain(::std::iter::once(Ok(None)))
|
||||
.chain(::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2))))
|
||||
.take(SIZE as usize / 2 - 1))
|
||||
).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn compute_root_works() {
|
||||
assert!(compute_root::<Header, BlakeTwo256, _>(
|
||||
SIZE as _,
|
||||
42,
|
||||
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1))))
|
||||
.take(SIZE as usize)
|
||||
).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn build_proof_panics_when_querying_wrong_block() {
|
||||
assert!(build_proof::<Header, BlakeTwo256, _, _>(
|
||||
SIZE as _,
|
||||
0,
|
||||
vec![(SIZE * 1000) as u64],
|
||||
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1))))
|
||||
.take(SIZE as usize)
|
||||
).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn build_proof_works() {
|
||||
assert!(build_proof::<Header, BlakeTwo256, _, _>(
|
||||
SIZE as _,
|
||||
0,
|
||||
vec![(SIZE / 2) as u64],
|
||||
::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1))))
|
||||
.take(SIZE as usize)
|
||||
).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn for_each_cht_group_panics() {
|
||||
let cht_size = SIZE as u64;
|
||||
let _ = for_each_cht_group::<Header, _, _, _>(
|
||||
cht_size,
|
||||
vec![cht_size * 5, cht_size * 2],
|
||||
|_, _, _| Ok(()),
|
||||
(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn for_each_cht_group_works() {
|
||||
let cht_size = SIZE as u64;
|
||||
let _ = for_each_cht_group::<Header, _, _, _>(
|
||||
cht_size,
|
||||
vec![
|
||||
cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5,
|
||||
cht_size * 4 + 1, cht_size * 4 + 7,
|
||||
cht_size * 6 + 1
|
||||
], |_, cht_num, blocks| {
|
||||
match cht_num {
|
||||
2 => assert_eq!(blocks, vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5]),
|
||||
4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]),
|
||||
6 => assert_eq!(blocks, vec![cht_size * 6 + 1]),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}, ()
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,740 @@
|
||||
// Copyright 2017-2020 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! In memory client backend
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use parking_lot::RwLock;
|
||||
use sp_core::storage::well_known_keys;
|
||||
use sp_core::offchain::storage::{
|
||||
InMemOffchainStorage as OffchainStorage
|
||||
};
|
||||
use sp_runtime::generic::BlockId;
|
||||
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor};
|
||||
use sp_runtime::{Justification, Storage};
|
||||
use sp_state_machine::{
|
||||
ChangesTrieTransaction, InMemoryBackend, Backend as StateBackend, StorageCollection,
|
||||
ChildStorageCollection,
|
||||
};
|
||||
use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata};
|
||||
|
||||
use crate::{
|
||||
backend::{self, NewBlockState},
|
||||
blockchain::{
|
||||
self, BlockStatus, HeaderBackend, well_known_cache_keys::Id as CacheKeyId
|
||||
},
|
||||
UsageInfo,
|
||||
light,
|
||||
leaves::LeafSet,
|
||||
};
|
||||
|
||||
struct PendingBlock<B: BlockT> {
|
||||
block: StoredBlock<B>,
|
||||
state: NewBlockState,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
enum StoredBlock<B: BlockT> {
|
||||
Header(B::Header, Option<Justification>),
|
||||
Full(B, Option<Justification>),
|
||||
}
|
||||
|
||||
impl<B: BlockT> StoredBlock<B> {
|
||||
fn new(header: B::Header, body: Option<Vec<B::Extrinsic>>, just: Option<Justification>) -> Self {
|
||||
match body {
|
||||
Some(body) => StoredBlock::Full(B::new(header, body), just),
|
||||
None => StoredBlock::Header(header, just),
|
||||
}
|
||||
}
|
||||
|
||||
fn header(&self) -> &B::Header {
|
||||
match *self {
|
||||
StoredBlock::Header(ref h, _) => h,
|
||||
StoredBlock::Full(ref b, _) => b.header(),
|
||||
}
|
||||
}
|
||||
|
||||
fn justification(&self) -> Option<&Justification> {
|
||||
match *self {
|
||||
StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
fn extrinsics(&self) -> Option<&[B::Extrinsic]> {
|
||||
match *self {
|
||||
StoredBlock::Header(_, _) => None,
|
||||
StoredBlock::Full(ref b, _) => Some(b.extrinsics()),
|
||||
}
|
||||
}
|
||||
|
||||
fn into_inner(self) -> (B::Header, Option<Vec<B::Extrinsic>>, Option<Justification>) {
|
||||
match self {
|
||||
StoredBlock::Header(header, just) => (header, None, just),
|
||||
StoredBlock::Full(block, just) => {
|
||||
let (header, body) = block.deconstruct();
|
||||
(header, Some(body), just)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BlockchainStorage<Block: BlockT> {
|
||||
blocks: HashMap<Block::Hash, StoredBlock<Block>>,
|
||||
hashes: HashMap<NumberFor<Block>, Block::Hash>,
|
||||
best_hash: Block::Hash,
|
||||
best_number: NumberFor<Block>,
|
||||
finalized_hash: Block::Hash,
|
||||
finalized_number: NumberFor<Block>,
|
||||
genesis_hash: Block::Hash,
|
||||
header_cht_roots: HashMap<NumberFor<Block>, Block::Hash>,
|
||||
changes_trie_cht_roots: HashMap<NumberFor<Block>, Block::Hash>,
|
||||
leaves: LeafSet<Block::Hash, NumberFor<Block>>,
|
||||
aux: HashMap<Vec<u8>, Vec<u8>>,
|
||||
}
|
||||
|
||||
/// In-memory blockchain. Supports concurrent reads.
|
||||
pub struct Blockchain<Block: BlockT> {
|
||||
storage: Arc<RwLock<BlockchainStorage<Block>>>,
|
||||
}
|
||||
|
||||
impl<Block: BlockT + Clone> Clone for Blockchain<Block> {
|
||||
fn clone(&self) -> Self {
|
||||
let storage = Arc::new(RwLock::new(self.storage.read().clone()));
|
||||
Blockchain {
|
||||
storage: storage.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> Blockchain<Block> {
|
||||
/// Get header hash of given block.
|
||||
pub fn id(&self, id: BlockId<Block>) -> Option<Block::Hash> {
|
||||
match id {
|
||||
BlockId::Hash(h) => Some(h),
|
||||
BlockId::Number(n) => self.storage.read().hashes.get(&n).cloned(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new in-memory blockchain storage.
|
||||
pub fn new() -> Blockchain<Block> {
|
||||
let storage = Arc::new(RwLock::new(
|
||||
BlockchainStorage {
|
||||
blocks: HashMap::new(),
|
||||
hashes: HashMap::new(),
|
||||
best_hash: Default::default(),
|
||||
best_number: Zero::zero(),
|
||||
finalized_hash: Default::default(),
|
||||
finalized_number: Zero::zero(),
|
||||
genesis_hash: Default::default(),
|
||||
header_cht_roots: HashMap::new(),
|
||||
changes_trie_cht_roots: HashMap::new(),
|
||||
leaves: LeafSet::new(),
|
||||
aux: HashMap::new(),
|
||||
}));
|
||||
Blockchain {
|
||||
storage: storage.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a block header and associated data.
|
||||
pub fn insert(
|
||||
&self,
|
||||
hash: Block::Hash,
|
||||
header: <Block as BlockT>::Header,
|
||||
justification: Option<Justification>,
|
||||
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
|
||||
new_state: NewBlockState,
|
||||
) -> sp_blockchain::Result<()> {
|
||||
let number = header.number().clone();
|
||||
if new_state.is_best() {
|
||||
self.apply_head(&header)?;
|
||||
}
|
||||
|
||||
{
|
||||
let mut storage = self.storage.write();
|
||||
storage.leaves.import(hash.clone(), number.clone(), header.parent_hash().clone());
|
||||
storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification));
|
||||
|
||||
if let NewBlockState::Final = new_state {
|
||||
storage.finalized_hash = hash;
|
||||
storage.finalized_number = number.clone();
|
||||
}
|
||||
|
||||
if number == Zero::zero() {
|
||||
storage.genesis_hash = hash;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get total number of blocks.
|
||||
pub fn blocks_count(&self) -> usize {
|
||||
self.storage.read().blocks.len()
|
||||
}
|
||||
|
||||
/// Compare this blockchain with another in-mem blockchain
|
||||
pub fn equals_to(&self, other: &Self) -> bool {
|
||||
self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks
|
||||
}
|
||||
|
||||
/// Compare canonical chain to other canonical chain.
|
||||
pub fn canon_equals_to(&self, other: &Self) -> bool {
|
||||
let this = self.storage.read();
|
||||
let other = other.storage.read();
|
||||
this.hashes == other.hashes
|
||||
&& this.best_hash == other.best_hash
|
||||
&& this.best_number == other.best_number
|
||||
&& this.genesis_hash == other.genesis_hash
|
||||
}
|
||||
|
||||
/// Insert header CHT root.
|
||||
pub fn insert_cht_root(&self, block: NumberFor<Block>, cht_root: Block::Hash) {
|
||||
self.storage.write().header_cht_roots.insert(block, cht_root);
|
||||
}
|
||||
|
||||
/// Set an existing block as head.
|
||||
pub fn set_head(&self, id: BlockId<Block>) -> sp_blockchain::Result<()> {
|
||||
let header = match self.header(id)? {
|
||||
Some(h) => h,
|
||||
None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))),
|
||||
};
|
||||
|
||||
self.apply_head(&header)
|
||||
}
|
||||
|
||||
fn apply_head(&self, header: &<Block as BlockT>::Header) -> sp_blockchain::Result<()> {
|
||||
let hash = header.hash();
|
||||
let number = header.number();
|
||||
|
||||
// Note: this may lock storage, so it must happen before obtaining storage
|
||||
// write lock.
|
||||
let best_tree_route = {
|
||||
let best_hash = self.storage.read().best_hash;
|
||||
if &best_hash == header.parent_hash() {
|
||||
None
|
||||
} else {
|
||||
let route = sp_blockchain::tree_route(self, best_hash, *header.parent_hash())?;
|
||||
Some(route)
|
||||
}
|
||||
};
|
||||
|
||||
let mut storage = self.storage.write();
|
||||
|
||||
if let Some(tree_route) = best_tree_route {
|
||||
// apply retraction and enaction when reorganizing up to parent hash
|
||||
let enacted = tree_route.enacted();
|
||||
|
||||
for entry in enacted {
|
||||
storage.hashes.insert(entry.number, entry.hash);
|
||||
}
|
||||
|
||||
for entry in tree_route.retracted().iter().skip(enacted.len()) {
|
||||
storage.hashes.remove(&entry.number);
|
||||
}
|
||||
}
|
||||
|
||||
storage.best_hash = hash.clone();
|
||||
storage.best_number = number.clone();
|
||||
storage.hashes.insert(number.clone(), hash.clone());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finalize_header(&self, id: BlockId<Block>, justification: Option<Justification>) -> sp_blockchain::Result<()> {
|
||||
let hash = match self.header(id)? {
|
||||
Some(h) => h.hash(),
|
||||
None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))),
|
||||
};
|
||||
|
||||
let mut storage = self.storage.write();
|
||||
storage.finalized_hash = hash;
|
||||
|
||||
if justification.is_some() {
|
||||
let block = storage.blocks.get_mut(&hash)
|
||||
.expect("hash was fetched from a block in the db; qed");
|
||||
|
||||
let block_justification = match block {
|
||||
StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j
|
||||
};
|
||||
|
||||
*block_justification = justification;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_aux(&self, ops: Vec<(Vec<u8>, Option<Vec<u8>>)>) {
|
||||
let mut storage = self.storage.write();
|
||||
for (k, v) in ops {
|
||||
match v {
|
||||
Some(v) => storage.aux.insert(k, v),
|
||||
None => storage.aux.remove(&k),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> HeaderBackend<Block> for Blockchain<Block> {
|
||||
fn header(&self, id: BlockId<Block>) -> sp_blockchain::Result<Option<<Block as BlockT>::Header>> {
|
||||
Ok(self.id(id).and_then(|hash| {
|
||||
self.storage.read().blocks.get(&hash).map(|b| b.header().clone())
|
||||
}))
|
||||
}
|
||||
|
||||
fn info(&self) -> blockchain::Info<Block> {
|
||||
let storage = self.storage.read();
|
||||
blockchain::Info {
|
||||
best_hash: storage.best_hash,
|
||||
best_number: storage.best_number,
|
||||
genesis_hash: storage.genesis_hash,
|
||||
finalized_hash: storage.finalized_hash,
|
||||
finalized_number: storage.finalized_number,
|
||||
number_leaves: storage.leaves.count()
|
||||
}
|
||||
}
|
||||
|
||||
fn status(&self, id: BlockId<Block>) -> sp_blockchain::Result<BlockStatus> {
|
||||
match self.id(id).map_or(false, |hash| self.storage.read().blocks.contains_key(&hash)) {
|
||||
true => Ok(BlockStatus::InChain),
|
||||
false => Ok(BlockStatus::Unknown),
|
||||
}
|
||||
}
|
||||
|
||||
fn number(&self, hash: Block::Hash) -> sp_blockchain::Result<Option<NumberFor<Block>>> {
|
||||
Ok(self.storage.read().blocks.get(&hash).map(|b| *b.header().number()))
|
||||
}
|
||||
|
||||
fn hash(&self, number: <<Block as BlockT>::Header as HeaderT>::Number) -> sp_blockchain::Result<Option<Block::Hash>> {
|
||||
Ok(self.id(BlockId::Number(number)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> HeaderMetadata<Block> for Blockchain<Block> {
|
||||
type Error = sp_blockchain::Error;
|
||||
|
||||
fn header_metadata(&self, hash: Block::Hash) -> Result<CachedHeaderMetadata<Block>, Self::Error> {
|
||||
self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header))
|
||||
.ok_or(sp_blockchain::Error::UnknownBlock(format!("header not found: {}", hash)))
|
||||
}
|
||||
|
||||
fn insert_header_metadata(&self, _hash: Block::Hash, _metadata: CachedHeaderMetadata<Block>) {
|
||||
// No need to implement.
|
||||
}
|
||||
fn remove_header_metadata(&self, _hash: Block::Hash) {
|
||||
// No need to implement.
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> blockchain::Backend<Block> for Blockchain<Block> {
|
||||
fn body(&self, id: BlockId<Block>) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
|
||||
Ok(self.id(id).and_then(|hash| {
|
||||
self.storage.read().blocks.get(&hash)
|
||||
.and_then(|b| b.extrinsics().map(|x| x.to_vec()))
|
||||
}))
|
||||
}
|
||||
|
||||
fn justification(&self, id: BlockId<Block>) -> sp_blockchain::Result<Option<Justification>> {
|
||||
Ok(self.id(id).and_then(|hash| self.storage.read().blocks.get(&hash).and_then(|b|
|
||||
b.justification().map(|x| x.clone()))
|
||||
))
|
||||
}
|
||||
|
||||
fn last_finalized(&self) -> sp_blockchain::Result<Block::Hash> {
|
||||
Ok(self.storage.read().finalized_hash.clone())
|
||||
}
|
||||
|
||||
fn cache(&self) -> Option<Arc<dyn blockchain::Cache<Block>>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn leaves(&self) -> sp_blockchain::Result<Vec<Block::Hash>> {
|
||||
Ok(self.storage.read().leaves.hashes())
|
||||
}
|
||||
|
||||
fn children(&self, _parent_hash: Block::Hash) -> sp_blockchain::Result<Vec<Block::Hash>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> blockchain::ProvideCache<Block> for Blockchain<Block> {
|
||||
fn cache(&self) -> Option<Arc<dyn blockchain::Cache<Block>>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> backend::AuxStore for Blockchain<Block> {
|
||||
fn insert_aux<
|
||||
'a,
|
||||
'b: 'a,
|
||||
'c: 'a,
|
||||
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
|
||||
D: IntoIterator<Item=&'a &'b [u8]>,
|
||||
>(&self, insert: I, delete: D) -> sp_blockchain::Result<()> {
|
||||
let mut storage = self.storage.write();
|
||||
for (k, v) in insert {
|
||||
storage.aux.insert(k.to_vec(), v.to_vec());
|
||||
}
|
||||
for k in delete {
|
||||
storage.aux.remove(*k);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
|
||||
Ok(self.storage.read().aux.get(key).cloned())
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> light::Storage<Block> for Blockchain<Block>
|
||||
where
|
||||
Block::Hash: From<[u8; 32]>,
|
||||
{
|
||||
fn import_header(
|
||||
&self,
|
||||
header: Block::Header,
|
||||
_cache: HashMap<CacheKeyId, Vec<u8>>,
|
||||
state: NewBlockState,
|
||||
aux_ops: Vec<(Vec<u8>, Option<Vec<u8>>)>,
|
||||
) -> sp_blockchain::Result<()> {
|
||||
let hash = header.hash();
|
||||
self.insert(hash, header, None, None, state)?;
|
||||
|
||||
self.write_aux(aux_ops);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_head(&self, id: BlockId<Block>) -> sp_blockchain::Result<()> {
|
||||
Blockchain::set_head(self, id)
|
||||
}
|
||||
|
||||
fn last_finalized(&self) -> sp_blockchain::Result<Block::Hash> {
|
||||
Ok(self.storage.read().finalized_hash.clone())
|
||||
}
|
||||
|
||||
fn finalize_header(&self, id: BlockId<Block>) -> sp_blockchain::Result<()> {
|
||||
Blockchain::finalize_header(self, id, None)
|
||||
}
|
||||
|
||||
fn header_cht_root(
|
||||
&self,
|
||||
_cht_size: NumberFor<Block>,
|
||||
block: NumberFor<Block>,
|
||||
) -> sp_blockchain::Result<Option<Block::Hash>> {
|
||||
self.storage.read().header_cht_roots.get(&block).cloned()
|
||||
.ok_or_else(|| sp_blockchain::Error::Backend(format!("Header CHT for block {} not exists", block)))
|
||||
.map(Some)
|
||||
}
|
||||
|
||||
fn changes_trie_cht_root(
|
||||
&self,
|
||||
_cht_size: NumberFor<Block>,
|
||||
block: NumberFor<Block>,
|
||||
) -> sp_blockchain::Result<Option<Block::Hash>> {
|
||||
self.storage.read().changes_trie_cht_roots.get(&block).cloned()
|
||||
.ok_or_else(|| sp_blockchain::Error::Backend(format!("Changes trie CHT for block {} not exists", block)))
|
||||
.map(Some)
|
||||
}
|
||||
|
||||
fn cache(&self) -> Option<Arc<dyn blockchain::Cache<Block>>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn usage_info(&self) -> Option<UsageInfo> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// In-memory operation.
|
||||
pub struct BlockImportOperation<Block: BlockT> {
|
||||
pending_block: Option<PendingBlock<Block>>,
|
||||
pending_cache: HashMap<CacheKeyId, Vec<u8>>,
|
||||
old_state: InMemoryBackend<HashFor<Block>>,
|
||||
new_state: Option<<InMemoryBackend<HashFor<Block>> as StateBackend<HashFor<Block>>>::Transaction>,
|
||||
aux: Vec<(Vec<u8>, Option<Vec<u8>>)>,
|
||||
finalized_blocks: Vec<(BlockId<Block>, Option<Justification>)>,
|
||||
set_head: Option<BlockId<Block>>,
|
||||
}
|
||||
|
||||
impl<Block: BlockT> backend::BlockImportOperation<Block> for BlockImportOperation<Block> where
|
||||
Block::Hash: Ord,
|
||||
{
|
||||
type State = InMemoryBackend<HashFor<Block>>;
|
||||
|
||||
fn state(&self) -> sp_blockchain::Result<Option<&Self::State>> {
|
||||
Ok(Some(&self.old_state))
|
||||
}
|
||||
|
||||
fn set_block_data(
|
||||
&mut self,
|
||||
header: <Block as BlockT>::Header,
|
||||
body: Option<Vec<<Block as BlockT>::Extrinsic>>,
|
||||
justification: Option<Justification>,
|
||||
state: NewBlockState,
|
||||
) -> sp_blockchain::Result<()> {
|
||||
assert!(self.pending_block.is_none(), "Only one block per operation is allowed");
|
||||
self.pending_block = Some(PendingBlock {
|
||||
block: StoredBlock::new(header, body, justification),
|
||||
state,
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_cache(&mut self, cache: HashMap<CacheKeyId, Vec<u8>>) {
|
||||
self.pending_cache = cache;
|
||||
}
|
||||
|
||||
fn update_db_storage(
|
||||
&mut self,
|
||||
update: <InMemoryBackend<HashFor<Block>> as StateBackend<HashFor<Block>>>::Transaction,
|
||||
) -> sp_blockchain::Result<()> {
|
||||
self.new_state = Some(update);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_changes_trie(
|
||||
&mut self,
|
||||
_update: ChangesTrieTransaction<HashFor<Block>, NumberFor<Block>>,
|
||||
) -> sp_blockchain::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result<Block::Hash> {
|
||||
check_genesis_storage(&storage)?;
|
||||
|
||||
let child_delta = storage.children_default.into_iter()
|
||||
.map(|(_storage_key, child_content)|
|
||||
(child_content.child_info, child_content.data.into_iter().map(|(k, v)| (k, Some(v)))));
|
||||
|
||||
let (root, transaction) = self.old_state.full_storage_root(
|
||||
storage.top.into_iter().map(|(k, v)| (k, Some(v))),
|
||||
child_delta
|
||||
);
|
||||
|
||||
self.new_state = Some(transaction);
|
||||
Ok(root)
|
||||
}
|
||||
|
||||
fn insert_aux<I>(&mut self, ops: I) -> sp_blockchain::Result<()>
|
||||
where I: IntoIterator<Item=(Vec<u8>, Option<Vec<u8>>)>
|
||||
{
|
||||
self.aux.append(&mut ops.into_iter().collect());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_storage(
|
||||
&mut self,
|
||||
_update: StorageCollection,
|
||||
_child_update: ChildStorageCollection,
|
||||
) -> sp_blockchain::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mark_finalized(
|
||||
&mut self,
|
||||
block: BlockId<Block>,
|
||||
justification: Option<Justification>,
|
||||
) -> sp_blockchain::Result<()> {
|
||||
self.finalized_blocks.push((block, justification));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn mark_head(&mut self, block: BlockId<Block>) -> sp_blockchain::Result<()> {
|
||||
assert!(self.pending_block.is_none(), "Only one set block per operation is allowed");
|
||||
self.set_head = Some(block);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// In-memory backend. Keeps all states and blocks in memory.
|
||||
///
|
||||
/// > **Warning**: Doesn't support all the features necessary for a proper database. Only use this
|
||||
/// > struct for testing purposes. Do **NOT** use in production.
|
||||
pub struct Backend<Block: BlockT> where Block::Hash: Ord {
|
||||
states: RwLock<HashMap<Block::Hash, InMemoryBackend<HashFor<Block>>>>,
|
||||
blockchain: Blockchain<Block>,
|
||||
import_lock: RwLock<()>,
|
||||
}
|
||||
|
||||
impl<Block: BlockT> Backend<Block> where Block::Hash: Ord {
|
||||
/// Create a new instance of in-mem backend.
|
||||
pub fn new() -> Self {
|
||||
Backend {
|
||||
states: RwLock::new(HashMap::new()),
|
||||
blockchain: Blockchain::new(),
|
||||
import_lock: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> backend::AuxStore for Backend<Block> where Block::Hash: Ord {
|
||||
fn insert_aux<
|
||||
'a,
|
||||
'b: 'a,
|
||||
'c: 'a,
|
||||
I: IntoIterator<Item=&'a(&'c [u8], &'c [u8])>,
|
||||
D: IntoIterator<Item=&'a &'b [u8]>,
|
||||
>(&self, insert: I, delete: D) -> sp_blockchain::Result<()> {
|
||||
self.blockchain.insert_aux(insert, delete)
|
||||
}
|
||||
|
||||
fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>> {
|
||||
self.blockchain.get_aux(key)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> backend::Backend<Block> for Backend<Block> where Block::Hash: Ord {
|
||||
type BlockImportOperation = BlockImportOperation<Block>;
|
||||
type Blockchain = Blockchain<Block>;
|
||||
type State = InMemoryBackend<HashFor<Block>>;
|
||||
type OffchainStorage = OffchainStorage;
|
||||
|
||||
fn begin_operation(&self) -> sp_blockchain::Result<Self::BlockImportOperation> {
|
||||
let old_state = self.state_at(BlockId::Hash(Default::default()))?;
|
||||
Ok(BlockImportOperation {
|
||||
pending_block: None,
|
||||
pending_cache: Default::default(),
|
||||
old_state,
|
||||
new_state: None,
|
||||
aux: Default::default(),
|
||||
finalized_blocks: Default::default(),
|
||||
set_head: None,
|
||||
})
|
||||
}
|
||||
|
||||
fn begin_state_operation(
|
||||
&self,
|
||||
operation: &mut Self::BlockImportOperation,
|
||||
block: BlockId<Block>,
|
||||
) -> sp_blockchain::Result<()> {
|
||||
operation.old_state = self.state_at(block)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> {
|
||||
if !operation.finalized_blocks.is_empty() {
|
||||
for (block, justification) in operation.finalized_blocks {
|
||||
self.blockchain.finalize_header(block, justification)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(pending_block) = operation.pending_block {
|
||||
let old_state = &operation.old_state;
|
||||
let (header, body, justification) = pending_block.block.into_inner();
|
||||
|
||||
let hash = header.hash();
|
||||
|
||||
let new_state = match operation.new_state {
|
||||
Some(state) => old_state.update_backend(*header.state_root(), state),
|
||||
None => old_state.clone(),
|
||||
};
|
||||
|
||||
self.states.write().insert(hash, new_state);
|
||||
|
||||
self.blockchain.insert(hash, header, justification, body, pending_block.state)?;
|
||||
}
|
||||
|
||||
if !operation.aux.is_empty() {
|
||||
self.blockchain.write_aux(operation.aux);
|
||||
}
|
||||
|
||||
if let Some(set_head) = operation.set_head {
|
||||
self.blockchain.set_head(set_head)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn finalize_block(
|
||||
&self,
|
||||
block: BlockId<Block>,
|
||||
justification: Option<Justification>,
|
||||
) -> sp_blockchain::Result<()> {
|
||||
self.blockchain.finalize_header(block, justification)
|
||||
}
|
||||
|
||||
fn blockchain(&self) -> &Self::Blockchain {
|
||||
&self.blockchain
|
||||
}
|
||||
|
||||
fn usage_info(&self) -> Option<UsageInfo> {
|
||||
None
|
||||
}
|
||||
|
||||
fn changes_trie_storage(&self) -> Option<&dyn backend::PrunableStateChangesTrieStorage<Block>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn offchain_storage(&self) -> Option<Self::OffchainStorage> {
|
||||
None
|
||||
}
|
||||
|
||||
fn state_at(&self, block: BlockId<Block>) -> sp_blockchain::Result<Self::State> {
|
||||
match block {
|
||||
BlockId::Hash(h) if h == Default::default() => {
|
||||
return Ok(Self::State::default());
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
|
||||
match self.blockchain.id(block).and_then(|id| self.states.read().get(&id).cloned()) {
|
||||
Some(state) => Ok(state),
|
||||
None => Err(sp_blockchain::Error::UnknownBlock(format!("{}", block))),
|
||||
}
|
||||
}
|
||||
|
||||
fn revert(
|
||||
&self,
|
||||
_n: NumberFor<Block>,
|
||||
_revert_finalized: bool,
|
||||
) -> sp_blockchain::Result<NumberFor<Block>> {
|
||||
Ok(Zero::zero())
|
||||
}
|
||||
|
||||
fn get_import_lock(&self) -> &RwLock<()> {
|
||||
&self.import_lock
|
||||
}
|
||||
}
|
||||
|
||||
impl<Block: BlockT> backend::LocalBackend<Block> for Backend<Block> where Block::Hash: Ord {}
|
||||
|
||||
impl<Block: BlockT> backend::RemoteBackend<Block> for Backend<Block> where Block::Hash: Ord {
|
||||
fn is_local_state_available(&self, block: &BlockId<Block>) -> bool {
|
||||
self.blockchain.expect_block_number_from_id(block)
|
||||
.map(|num| num.is_zero())
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
fn remote_blockchain(&self) -> Arc<dyn light::RemoteBlockchain<Block>> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
/// Check that genesis storage is valid.
|
||||
pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> {
|
||||
if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) {
|
||||
return Err(sp_blockchain::Error::GenesisInvalid.into());
|
||||
}
|
||||
|
||||
if storage.children_default.keys()
|
||||
.any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) {
|
||||
return Err(sp_blockchain::Error::GenesisInvalid.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,379 @@
|
||||
// Copyright 2018-2020 Parity Technologies (UK) Ltd.
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Substrate is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
|
||||
// Substrate is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
//! Helper for managing the set of available leaves in the chain for DB implementations.
|
||||
|
||||
use std::collections::BTreeMap;
|
||||
use std::cmp::Reverse;
|
||||
use sp_database::{Database, Transaction};
|
||||
use sp_runtime::traits::AtLeast32Bit;
|
||||
use codec::{Encode, Decode};
|
||||
use sp_blockchain::{Error, Result};
|
||||
|
||||
type DbHash = [u8; 32];
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
struct LeafSetItem<H, N> {
|
||||
hash: H,
|
||||
number: Reverse<N>,
|
||||
}
|
||||
|
||||
/// A displaced leaf after import.
|
||||
#[must_use = "Displaced items from the leaf set must be handled."]
|
||||
pub struct ImportDisplaced<H, N> {
|
||||
new_hash: H,
|
||||
displaced: LeafSetItem<H, N>,
|
||||
}
|
||||
|
||||
/// Displaced leaves after finalization.
|
||||
#[must_use = "Displaced items from the leaf set must be handled."]
|
||||
pub struct FinalizationDisplaced<H, N> {
|
||||
leaves: BTreeMap<Reverse<N>, Vec<H>>,
|
||||
}
|
||||
|
||||
impl<H, N: Ord> FinalizationDisplaced<H, N> {
|
||||
/// Merge with another. This should only be used for displaced items that
|
||||
/// are produced within one transaction of each other.
|
||||
pub fn merge(&mut self, mut other: Self) {
|
||||
// this will ignore keys that are in duplicate, however
|
||||
// if these are actually produced correctly via the leaf-set within
|
||||
// one transaction, then there will be no overlap in the keys.
|
||||
self.leaves.append(&mut other.leaves);
|
||||
}
|
||||
}
|
||||
|
||||
/// list of leaf hashes ordered by number (descending).
|
||||
/// stored in memory for fast access.
|
||||
/// this allows very fast checking and modification of active leaves.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct LeafSet<H, N> {
|
||||
storage: BTreeMap<Reverse<N>, Vec<H>>,
|
||||
pending_added: Vec<(H, N)>,
|
||||
pending_removed: Vec<H>,
|
||||
}
|
||||
|
||||
impl<H, N> LeafSet<H, N> where
|
||||
H: Clone + PartialEq + Decode + Encode,
|
||||
N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode,
|
||||
{
|
||||
/// Construct a new, blank leaf set.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
storage: BTreeMap::new(),
|
||||
pending_added: Vec::new(),
|
||||
pending_removed: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the leaf list from the DB, using given prefix for keys.
|
||||
pub fn read_from_db(db: &dyn Database<DbHash>, column: u32, prefix: &[u8]) -> Result<Self> {
|
||||
let mut storage = BTreeMap::new();
|
||||
|
||||
match db.get(column, prefix) {
|
||||
Some(leaves) => {
|
||||
let vals: Vec<_> = match Decode::decode(&mut leaves.as_ref()) {
|
||||
Ok(vals) => vals,
|
||||
Err(_) => return Err(Error::Backend("Error decoding leaves".into())),
|
||||
};
|
||||
for (number, hashes) in vals.into_iter() {
|
||||
storage.insert(Reverse(number), hashes);
|
||||
}
|
||||
}
|
||||
None => {},
|
||||
}
|
||||
Ok(Self {
|
||||
storage,
|
||||
pending_added: Vec::new(),
|
||||
pending_removed: Vec::new(),
|
||||
})
|
||||
}
|
||||
|
||||
/// update the leaf list on import. returns a displaced leaf if there was one.
|
||||
pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option<ImportDisplaced<H, N>> {
|
||||
// avoid underflow for genesis.
|
||||
let displaced = if number != N::zero() {
|
||||
let new_number = Reverse(number.clone() - N::one());
|
||||
let was_displaced = self.remove_leaf(&new_number, &parent_hash);
|
||||
|
||||
if was_displaced {
|
||||
self.pending_removed.push(parent_hash.clone());
|
||||
Some(ImportDisplaced {
|
||||
new_hash: hash.clone(),
|
||||
displaced: LeafSetItem {
|
||||
hash: parent_hash,
|
||||
number: new_number,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
self.insert_leaf(Reverse(number.clone()), hash.clone());
|
||||
self.pending_added.push((hash, number));
|
||||
displaced
|
||||
}
|
||||
|
||||
/// Note a block height finalized, displacing all leaves with number less than the finalized block's.
|
||||
///
|
||||
/// Although it would be more technically correct to also prune out leaves at the
|
||||
/// same number as the finalized block, but with different hashes, the current behavior
|
||||
/// is simpler and our assumptions about how finalization works means that those leaves
|
||||
/// will be pruned soon afterwards anyway.
|
||||
pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced<H, N> {
|
||||
let boundary = if number == N::zero() {
|
||||
return FinalizationDisplaced { leaves: BTreeMap::new() };
|
||||
} else {
|
||||
number - N::one()
|
||||
};
|
||||
|
||||
let below_boundary = self.storage.split_off(&Reverse(boundary));
|
||||
self.pending_removed.extend(below_boundary.values().flat_map(|h| h.iter()).cloned());
|
||||
FinalizationDisplaced {
|
||||
leaves: below_boundary,
|
||||
}
|
||||
}
|
||||
|
||||
/// Undo all pending operations.
|
||||
///
|
||||
/// This returns an `Undo` struct, where any
|
||||
/// `Displaced` objects that have returned by previous method calls
|
||||
/// should be passed to via the appropriate methods. Otherwise,
|
||||
/// the on-disk state may get out of sync with in-memory state.
|
||||
pub fn undo(&mut self) -> Undo<H, N> {
|
||||
Undo { inner: self }
|
||||
}
|
||||
|
||||
/// Revert to the given block height by dropping all leaves in the leaf set
|
||||
/// with a block number higher than the target.
|
||||
pub fn revert(&mut self, best_hash: H, best_number: N) {
|
||||
let items = self.storage.iter()
|
||||
.flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), number.clone())))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for (hash, number) in &items {
|
||||
if number.0 > best_number {
|
||||
assert!(
|
||||
self.remove_leaf(number, hash),
|
||||
"item comes from an iterator over storage; qed",
|
||||
);
|
||||
|
||||
self.pending_removed.push(hash.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let best_number = Reverse(best_number);
|
||||
let leaves_contains_best = self.storage
|
||||
.get(&best_number)
|
||||
.map_or(false, |hashes| hashes.contains(&best_hash));
|
||||
|
||||
// we need to make sure that the best block exists in the leaf set as
|
||||
// this is an invariant of regular block import.
|
||||
if !leaves_contains_best {
|
||||
self.insert_leaf(best_number.clone(), best_hash.clone());
|
||||
self.pending_added.push((best_hash, best_number.0));
|
||||
}
|
||||
}
|
||||
|
||||
/// returns an iterator over all hashes in the leaf set
|
||||
/// ordered by their block number descending.
|
||||
pub fn hashes(&self) -> Vec<H> {
|
||||
self.storage.iter().flat_map(|(_, hashes)| hashes.iter()).cloned().collect()
|
||||
}
|
||||
|
||||
/// Number of known leaves
|
||||
pub fn count(&self) -> usize {
|
||||
self.storage.len()
|
||||
}
|
||||
|
||||
/// Write the leaf list to the database transaction.
|
||||
pub fn prepare_transaction(&mut self, tx: &mut Transaction<DbHash>, column: u32, prefix: &[u8]) {
|
||||
let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0.clone(), h.clone())).collect();
|
||||
tx.set_from_vec(column, prefix, leaves.encode());
|
||||
self.pending_added.clear();
|
||||
self.pending_removed.clear();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn contains(&self, number: N, hash: H) -> bool {
|
||||
self.storage.get(&Reverse(number)).map_or(false, |hashes| hashes.contains(&hash))
|
||||
}
|
||||
|
||||
fn insert_leaf(&mut self, number: Reverse<N>, hash: H) {
|
||||
self.storage.entry(number).or_insert_with(Vec::new).push(hash);
|
||||
}
|
||||
|
||||
// returns true if this leaf was contained, false otherwise.
|
||||
fn remove_leaf(&mut self, number: &Reverse<N>, hash: &H) -> bool {
|
||||
let mut empty = false;
|
||||
let removed = self.storage.get_mut(number).map_or(false, |leaves| {
|
||||
let mut found = false;
|
||||
leaves.retain(|h| if h == hash {
|
||||
found = true;
|
||||
false
|
||||
} else {
|
||||
true
|
||||
});
|
||||
|
||||
if leaves.is_empty() { empty = true }
|
||||
|
||||
found
|
||||
});
|
||||
|
||||
if removed && empty {
|
||||
self.storage.remove(number);
|
||||
}
|
||||
|
||||
removed
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper for undoing operations.
|
||||
pub struct Undo<'a, H: 'a, N: 'a> {
|
||||
inner: &'a mut LeafSet<H, N>,
|
||||
}
|
||||
|
||||
impl<'a, H: 'a, N: 'a> Undo<'a, H, N> where
|
||||
H: Clone + PartialEq + Decode + Encode,
|
||||
N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode,
|
||||
{
|
||||
/// Undo an imported block by providing the displaced leaf.
|
||||
pub fn undo_import(&mut self, displaced: ImportDisplaced<H, N>) {
|
||||
let new_number = Reverse(displaced.displaced.number.0.clone() + N::one());
|
||||
self.inner.remove_leaf(&new_number, &displaced.new_hash);
|
||||
self.inner.insert_leaf(new_number, displaced.displaced.hash);
|
||||
}
|
||||
|
||||
/// Undo a finalization operation by providing the displaced leaves.
|
||||
pub fn undo_finalization(&mut self, mut displaced: FinalizationDisplaced<H, N>) {
|
||||
self.inner.storage.append(&mut displaced.leaves);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, H: 'a, N: 'a> Drop for Undo<'a, H, N> {
|
||||
fn drop(&mut self) {
|
||||
self.inner.pending_added.clear();
|
||||
self.inner.pending_removed.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let mut set = LeafSet::new();
|
||||
set.import(0u32, 0u32, 0u32);
|
||||
|
||||
set.import(1_1, 1, 0);
|
||||
set.import(2_1, 2, 1_1);
|
||||
set.import(3_1, 3, 2_1);
|
||||
|
||||
assert!(set.contains(3, 3_1));
|
||||
assert!(!set.contains(2, 2_1));
|
||||
assert!(!set.contains(1, 1_1));
|
||||
assert!(!set.contains(0, 0));
|
||||
|
||||
set.import(2_2, 2, 1_1);
|
||||
|
||||
assert!(set.contains(3, 3_1));
|
||||
assert!(set.contains(2, 2_2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flush_to_disk() {
|
||||
const PREFIX: &[u8] = b"abcdefg";
|
||||
let db = Arc::new(sp_database::MemDb::default());
|
||||
|
||||
let mut set = LeafSet::new();
|
||||
set.import(0u32, 0u32, 0u32);
|
||||
|
||||
set.import(1_1, 1, 0);
|
||||
set.import(2_1, 2, 1_1);
|
||||
set.import(3_1, 3, 2_1);
|
||||
|
||||
let mut tx = Transaction::new();
|
||||
|
||||
set.prepare_transaction(&mut tx, 0, PREFIX);
|
||||
db.commit(tx);
|
||||
|
||||
let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap();
|
||||
assert_eq!(set, set2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn two_leaves_same_height_can_be_included() {
|
||||
let mut set = LeafSet::new();
|
||||
|
||||
set.import(1_1u32, 10u32,0u32);
|
||||
set.import(1_2, 10, 0);
|
||||
|
||||
assert!(set.storage.contains_key(&Reverse(10)));
|
||||
assert!(set.contains(10, 1_1));
|
||||
assert!(set.contains(10, 1_2));
|
||||
assert!(!set.contains(10, 1_3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn finalization_consistent_with_disk() {
|
||||
const PREFIX: &[u8] = b"prefix";
|
||||
let db = Arc::new(sp_database::MemDb::default());
|
||||
|
||||
let mut set = LeafSet::new();
|
||||
set.import(10_1u32, 10u32, 0u32);
|
||||
set.import(11_1, 11, 10_2);
|
||||
set.import(11_2, 11, 10_2);
|
||||
set.import(12_1, 12, 11_123);
|
||||
|
||||
assert!(set.contains(10, 10_1));
|
||||
|
||||
let mut tx = Transaction::new();
|
||||
set.prepare_transaction(&mut tx, 0, PREFIX);
|
||||
db.commit(tx);
|
||||
|
||||
let _ = set.finalize_height(11);
|
||||
let mut tx = Transaction::new();
|
||||
set.prepare_transaction(&mut tx, 0, PREFIX);
|
||||
db.commit(tx);
|
||||
|
||||
assert!(set.contains(11, 11_1));
|
||||
assert!(set.contains(11, 11_2));
|
||||
assert!(set.contains(12, 12_1));
|
||||
assert!(!set.contains(10, 10_1));
|
||||
|
||||
let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap();
|
||||
assert_eq!(set, set2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn undo_finalization() {
|
||||
let mut set = LeafSet::new();
|
||||
set.import(10_1u32, 10u32, 0u32);
|
||||
set.import(11_1, 11, 10_2);
|
||||
set.import(11_2, 11, 10_2);
|
||||
set.import(12_1, 12, 11_123);
|
||||
|
||||
let displaced = set.finalize_height(11);
|
||||
assert!(!set.contains(10, 10_1));
|
||||
|
||||
set.undo().undo_finalization(displaced);
|
||||
assert!(set.contains(10, 10_1));
|
||||
}
|
||||
}
|
||||
@@ -20,8 +20,11 @@
|
||||
pub mod backend;
|
||||
pub mod call_executor;
|
||||
pub mod client;
|
||||
pub mod cht;
|
||||
pub mod execution_extensions;
|
||||
pub mod in_mem;
|
||||
pub mod light;
|
||||
pub mod leaves;
|
||||
pub mod notifications;
|
||||
pub mod proof_provider;
|
||||
|
||||
@@ -36,6 +39,13 @@ pub use proof_provider::*;
|
||||
|
||||
pub use sp_state_machine::{StorageProof, ExecutionStrategy, CloneableSpawn};
|
||||
|
||||
/// Usage Information Provider interface
|
||||
///
|
||||
pub trait UsageProvider<Block: sp_runtime::traits::Block> {
|
||||
/// Get usage info about current client.
|
||||
fn usage_info(&self) -> ClientInfo<Block>;
|
||||
}
|
||||
|
||||
/// Utility methods for the client.
|
||||
pub mod utils {
|
||||
use sp_blockchain::{HeaderBackend, HeaderMetadata, Error};
|
||||
|
||||
@@ -296,7 +296,25 @@ pub trait RemoteBlockchain<Block: BlockT>: Send + Sync {
|
||||
>>;
|
||||
}
|
||||
|
||||
/// Returns future that resolves header either locally, or remotely.
|
||||
pub fn future_header<Block: BlockT, F: Fetcher<Block>>(
|
||||
blockchain: &dyn RemoteBlockchain<Block>,
|
||||
fetcher: &F,
|
||||
id: BlockId<Block>,
|
||||
) -> impl Future<Output = Result<Option<Block::Header>, ClientError>> {
|
||||
use futures::future::{ready, Either, FutureExt};
|
||||
|
||||
match blockchain.header(id) {
|
||||
Ok(LocalOrRemote::Remote(request)) => Either::Left(
|
||||
fetcher
|
||||
.remote_header(request)
|
||||
.then(|header| ready(header.map(Some)))
|
||||
),
|
||||
Ok(LocalOrRemote::Unknown) => Either::Right(ready(Ok(None))),
|
||||
Ok(LocalOrRemote::Local(local_header)) => Either::Right(ready(Ok(Some(local_header)))),
|
||||
Err(err) => Either::Right(ready(Err(err))),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
|
||||
Reference in New Issue
Block a user