removes use of sc_client::Client from sc-rpc (#5063)

* removes use of sc_client::Client from sc-rpc

* remove Client impl from sc-finality-benches

* remove client impl from sc-finality-grandpa

* read_proof accepts iterator

* remove generic Executor param from ExecutorProvider

* fix long ass line

* code style changes

* merge with master

Co-authored-by: Arkadiy Paronyan <arkady.paronyan@gmail.com>
This commit is contained in:
Benjamin Kampmann
2020-03-05 16:41:10 +01:00
committed by GitHub
parent dc85ccb7df
commit 99ae5342eb
34 changed files with 739 additions and 544 deletions
+2
View File
@@ -3569,6 +3569,7 @@ dependencies = [
"sc-basic-authorship",
"sc-cli",
"sc-client",
"sc-client-api",
"sc-consensus-aura",
"sc-executor",
"sc-finality-grandpa",
@@ -5777,6 +5778,7 @@ dependencies = [
"sp-runtime",
"sp-state-machine",
"sp-std",
"sp-storage",
"sp-test-primitives",
"sp-transaction-pool",
"sp-trie",
@@ -30,6 +30,7 @@ sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensu
grandpa = { version = "0.8.0-alpha.2", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" }
grandpa-primitives = { version = "2.0.0-alpha.2", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" }
sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" }
sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" }
sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" }
sc-basic-authorship = { path = "../../../client/basic-authorship" , version = "0.8.0-alpha.2"}
@@ -3,13 +3,14 @@
use std::sync::Arc;
use std::time::Duration;
use sc_client::LongestChain;
use sc_client_api::ExecutorProvider;
use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi};
use sc_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder};
use sp_inherents::InherentDataProviders;
use sc_executor::native_executor_instance;
pub use sc_executor::NativeExecutor;
use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair};
use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider};
use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider};
// Our native executor instance.
native_executor_instance!(
@@ -24,6 +25,7 @@ native_executor_instance!(
/// be able to perform chain operations.
macro_rules! new_full_start {
($config:expr) => {{
use std::sync::Arc;
let mut import_setup = None;
let inherent_data_providers = sp_inherents::InherentDataProviders::new();
@@ -42,7 +44,7 @@ macro_rules! new_full_start {
.ok_or_else(|| sc_service::Error::SelectChainRequired)?;
let (grandpa_block_import, grandpa_link) =
grandpa::block_import(client.clone(), &*client, select_chain)?;
grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?;
let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new(
grandpa_block_import.clone(), client.clone(),
@@ -87,9 +89,11 @@ pub fn new_full(config: Configuration<GenesisConfig>)
.expect("Link Half and Block Import are present for Full Services or setup failed before. qed");
let service = builder
.with_finality_proof_provider(|client, backend|
Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _)
)?
.with_finality_proof_provider(|client, backend| {
// GenesisAuthoritySetProvider is implemented for StorageAndProofProvider
let provider = client as Arc<dyn StorageAndProofProvider<_, _>>;
Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _)
})?
.build()?;
if participates_in_consensus {
@@ -201,7 +205,10 @@ pub fn new_light(config: Configuration<GenesisConfig>)
.map(|fetcher| fetcher.checker().clone())
.ok_or_else(|| "Trying to start light import queue without active fetch checker")?;
let grandpa_block_import = grandpa::light_block_import(
client.clone(), backend, &*client.clone(), Arc::new(fetch_checker),
client.clone(),
backend,
&(client.clone() as Arc<_>),
Arc::new(fetch_checker),
)?;
let finality_proof_import = grandpa_block_import.clone();
let finality_proof_request_builder =
@@ -218,8 +225,10 @@ pub fn new_light(config: Configuration<GenesisConfig>)
Ok((import_queue, finality_proof_request_builder))
})?
.with_finality_proof_provider(|client, backend|
Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _)
)?
.with_finality_proof_provider(|client, backend| {
// GenesisAuthoritySetProvider is implemented for StorageAndProofProvider
let provider = client as Arc<dyn StorageAndProofProvider<_, _>>;
Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _)
})?
.build()
}
+16 -11
View File
@@ -22,7 +22,7 @@ use std::sync::Arc;
use sc_consensus_babe;
use sc_client::{self, LongestChain};
use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider};
use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider};
use node_executor;
use node_primitives::Block;
use node_runtime::{GenesisConfig, RuntimeApi};
@@ -45,6 +45,7 @@ use sc_offchain::OffchainWorkers;
/// be able to perform chain operations.
macro_rules! new_full_start {
($config:expr) => {{
use std::sync::Arc;
type RpcExtension = jsonrpc_core::IoHandler<sc_rpc::Metadata>;
let mut import_setup = None;
let inherent_data_providers = sp_inherents::InherentDataProviders::new();
@@ -64,7 +65,7 @@ macro_rules! new_full_start {
.ok_or_else(|| sc_service::Error::SelectChainRequired)?;
let (grandpa_block_import, grandpa_link) = grandpa::block_import(
client.clone(),
&*client,
&(client.clone() as Arc<_>),
select_chain,
)?;
let justification_import = grandpa_block_import.clone();
@@ -116,6 +117,7 @@ macro_rules! new_full {
($config:expr, $with_startup_data: expr) => {{
use futures::prelude::*;
use sc_network::Event;
use sc_client_api::ExecutorProvider;
let (
is_authority,
@@ -139,9 +141,11 @@ macro_rules! new_full {
let (builder, mut import_setup, inherent_data_providers) = new_full_start!($config);
let service = builder
.with_finality_proof_provider(|client, backend|
Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, client)) as _)
)?
.with_finality_proof_provider(|client, backend| {
// GenesisAuthoritySetProvider is implemented for StorageAndProofProvider
let provider = client as Arc<dyn grandpa::StorageAndProofProvider<_, _>>;
Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, provider)) as _)
})?
.build()?;
let (block_import, grandpa_link, babe_link) = import_setup.take()
@@ -255,8 +259,7 @@ type ConcreteBlock = node_primitives::Block;
type ConcreteClient =
Client<
Backend<ConcreteBlock>,
LocalCallExecutor<Backend<ConcreteBlock>,
NativeExecutor<node_executor::Executor>>,
LocalCallExecutor<Backend<ConcreteBlock>, NativeExecutor<node_executor::Executor>>,
ConcreteBlock,
node_runtime::RuntimeApi
>;
@@ -317,7 +320,7 @@ pub fn new_light(config: NodeConfiguration)
let grandpa_block_import = grandpa::light_block_import(
client.clone(),
backend,
&*client,
&(client.clone() as Arc<_>),
Arc::new(fetch_checker),
)?;
@@ -342,9 +345,11 @@ pub fn new_light(config: NodeConfiguration)
Ok((import_queue, finality_proof_request_builder))
})?
.with_finality_proof_provider(|client, backend|
Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _)
)?
.with_finality_proof_provider(|client, backend| {
// GenesisAuthoritySetProvider is implemented for StorageAndProofProvider
let provider = client as Arc<dyn StorageAndProofProvider<_, _>>;
Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _)
})?
.with_rpc_extensions(|builder,| ->
Result<RpcExtension, _>
{
+1
View File
@@ -34,6 +34,7 @@ sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../.
sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" }
sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" }
sp-trie = { version = "2.0.0-alpha.2", path = "../../primitives/trie" }
sp-storage = { version = "2.0.0-alpha.2", path = "../../primitives/storage" }
sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" }
[dev-dependencies]
+119
View File
@@ -26,6 +26,7 @@ use sp_state_machine::{
ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction,
StorageCollection, ChildStorageCollection,
};
use sp_storage::{StorageData, StorageKey, ChildInfo};
use crate::{
blockchain::{
Backend as BlockchainBackend, well_known_cache_keys
@@ -38,6 +39,7 @@ use sp_consensus::BlockOrigin;
use parking_lot::RwLock;
pub use sp_state_machine::Backend as StateBackend;
use std::marker::PhantomData;
/// Extracts the state backend type for the given backend.
pub type StateBackendFor<B, Block> = <B as Backend<Block>>::State;
@@ -237,6 +239,123 @@ pub trait AuxStore {
fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result<Option<Vec<u8>>>;
}
/// An `Iterator` that iterates keys in a given block under a prefix.
pub struct KeyIterator<'a, State, Block> {
state: State,
prefix: Option<&'a StorageKey>,
current_key: Vec<u8>,
_phantom: PhantomData<Block>,
}
impl <'a, State, Block> KeyIterator<'a, State, Block> {
/// create a KeyIterator instance
pub fn new(state: State, prefix: Option<&'a StorageKey>, current_key: Vec<u8>) -> Self {
Self {
state,
prefix,
current_key,
_phantom: PhantomData,
}
}
}
impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where
Block: BlockT,
State: StateBackend<HashFor<Block>>,
{
type Item = StorageKey;
fn next(&mut self) -> Option<Self::Item> {
let next_key = self.state
.next_storage_key(&self.current_key)
.ok()
.flatten()?;
// this terminates the iterator the first time it fails.
if let Some(prefix) = self.prefix {
if !next_key.starts_with(&prefix.0[..]) {
return None;
}
}
self.current_key = next_key.clone();
Some(StorageKey(next_key))
}
}
/// Provides acess to storage primitives
pub trait StorageProvider<Block: BlockT, B: Backend<Block>> {
/// Given a `BlockId` and a key, return the value under the key in that block.
fn storage(&self, id: &BlockId<Block>, key: &StorageKey) -> sp_blockchain::Result<Option<StorageData>>;
/// Given a `BlockId` and a key prefix, return the matching storage keys in that block.
fn storage_keys(&self, id: &BlockId<Block>, key_prefix: &StorageKey) -> sp_blockchain::Result<Vec<StorageKey>>;
/// Given a `BlockId` and a key, return the value under the hash in that block.
fn storage_hash(&self, id: &BlockId<Block>, key: &StorageKey) -> sp_blockchain::Result<Option<Block::Hash>>;
/// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block.
fn storage_pairs(
&self,
id: &BlockId<Block>,
key_prefix: &StorageKey
) -> sp_blockchain::Result<Vec<(StorageKey, StorageData)>>;
/// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block.
fn storage_keys_iter<'a>(
&self,
id: &BlockId<Block>,
prefix: Option<&'a StorageKey>,
start_key: Option<&StorageKey>
) -> sp_blockchain::Result<KeyIterator<'a, B::State, Block>>;
/// Given a `BlockId`, a key and a child storage key, return the value under the key in that block.
fn child_storage(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<StorageData>>;
/// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys.
fn child_storage_keys(
&self,
id: &BlockId<Block>,
child_storage_key: &StorageKey,
child_info: ChildInfo,
key_prefix: &StorageKey
) -> sp_blockchain::Result<Vec<StorageKey>>;
/// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block.
fn child_storage_hash(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<Block::Hash>>;
/// Get longest range within [first; last] that is possible to use in `key_changes`
/// and `key_changes_proof` calls.
/// Range could be shortened from the beginning if some changes tries have been pruned.
/// Returns Ok(None) if changes tries are not supported.
fn max_key_changes_range(
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
) -> sp_blockchain::Result<Option<(NumberFor<Block>, BlockId<Block>)>>;
/// Get pairs of (block, extrinsic) where key has been changed at given blocks range.
/// Works only for runtimes that are supporting changes tries.
///
/// Changes are returned in descending order (i.e. last block comes first).
fn key_changes(
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
storage_key: Option<&StorageKey>,
key: &StorageKey
) -> sp_blockchain::Result<Vec<(NumberFor<Block>, u32)>>;
}
/// Client backend.
///
/// Manages the data layer.
+12
View File
@@ -29,6 +29,18 @@ use sp_externalities::Extensions;
use sp_core::NativeOrEncoded;
use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache};
use crate::execution_extensions::ExecutionExtensions;
/// Executor Provider
pub trait ExecutorProvider<Block: BlockT> {
/// executor instance
type Executor: CallExecutor<Block>;
/// Get call executor reference.
fn executor(&self) -> &Self::Executor;
/// Get a reference to the execution extensions.
fn execution_extensions(&self) -> &ExecutionExtensions<Block>;
}
/// Method call executor.
pub trait CallExecutor<B: BlockT> {
+6 -2
View File
@@ -21,7 +21,7 @@ use futures::channel::mpsc;
use sp_core::storage::StorageKey;
use sp_runtime::{
traits::{Block as BlockT, NumberFor},
generic::BlockId
generic::{BlockId, SignedBlock}
};
use sp_consensus::BlockOrigin;
@@ -76,9 +76,13 @@ pub trait BlockchainEvents<Block: BlockT> {
/// Fetch block body by ID.
pub trait BlockBody<Block: BlockT> {
/// Get block body by ID. Returns `None` if the body is not stored.
fn block_body(&self,
fn block_body(
&self,
id: &BlockId<Block>
) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get full block by id.
fn block(&self, id: &BlockId<Block>) -> sp_blockchain::Result<Option<SignedBlock<Block>>>;
}
/// Provide a list of potential uncle headers for a given block.
+2
View File
@@ -23,6 +23,7 @@ pub mod client;
pub mod execution_extensions;
pub mod light;
pub mod notifications;
pub mod proof_provider;
pub use sp_blockchain as blockchain;
pub use backend::*;
@@ -31,6 +32,7 @@ pub use call_executor::*;
pub use client::*;
pub use light::*;
pub use notifications::*;
pub use proof_provider::*;
pub use sp_state_machine::{StorageProof, ExecutionStrategy};
@@ -0,0 +1,71 @@
// Copyright 2017-2020 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Proof utilities
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT},
};
use crate::{StorageProof, ChangesProof};
use sp_storage::{ChildInfo, StorageKey};
/// Interface for providing block proving utilities.
pub trait ProofProvider<Block: BlockT> {
/// Reads storage value at a given block + key, returning read proof.
fn read_proof(
&self,
id: &BlockId<Block>,
keys: &mut dyn Iterator<Item=&[u8]>,
) -> sp_blockchain::Result<StorageProof>;
/// Reads child storage value at a given block + storage_key + key, returning
/// read proof.
fn read_child_proof(
&self,
id: &BlockId<Block>,
storage_key: &[u8],
child_info: ChildInfo,
keys: &mut dyn Iterator<Item=&[u8]>,
) -> sp_blockchain::Result<StorageProof>;
/// Execute a call to a contract on top of state in a block of given hash
/// AND returning execution proof.
///
/// No changes are made.
fn execution_proof(
&self,
id: &BlockId<Block>,
method: &str,
call_data: &[u8],
) -> sp_blockchain::Result<(Vec<u8>, StorageProof)>;
/// Reads given header and generates CHT-based header proof.
fn header_proof(&self, id: &BlockId<Block>) -> sp_blockchain::Result<(Block::Header, StorageProof)>;
/// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range.
/// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using
/// changes tries from ascendants of this block, we should provide proofs for changes tries roots
/// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants
/// of this block.
/// Works only for runtimes that are supporting changes tries.
fn key_changes_proof(
&self,
first: Block::Hash,
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
storage_key: Option<&StorageKey>,
key: &StorageKey,
) -> sp_blockchain::Result<ChangesProof<Block::Header>>;
}
@@ -28,10 +28,7 @@ use parking_lot::RwLock;
use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata};
use std::marker::PhantomData;
use sc_client_api::{
backend::Backend,
utils::is_descendent_of,
};
use sc_client_api::{backend::Backend, utils::is_descendent_of};
use sc_client::apply_aux;
use finality_grandpa::{
BlockNumberOps, Equivocation, Error as GrandpaError, round::State as RoundState,
@@ -34,16 +34,15 @@
//! finality proof (that finalizes some block C that is ancestor of the B and descendant
//! of the U) could be returned.
use std::iter;
use std::sync::Arc;
use log::{trace, warn};
use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult};
use sc_client_api::{
backend::Backend, CallExecutor, StorageProof,
backend::Backend, StorageProof,
light::{FetchChecker, RemoteReadRequest},
StorageProvider, ProofProvider,
};
use sc_client::Client;
use parity_scale_codec::{Encode, Decode};
use finality_grandpa::BlockNumberOps;
use sp_runtime::{
@@ -67,12 +66,25 @@ pub trait AuthoritySetForFinalityProver<Block: BlockT>: Send + Sync {
fn prove_authorities(&self, block: &BlockId<Block>) -> ClientResult<StorageProof>;
}
/// Client-based implementation of AuthoritySetForFinalityProver.
impl<B, E, Block: BlockT, RA> AuthoritySetForFinalityProver<Block> for Client<B, E, Block, RA>
/// Trait that combines `StorageProvider` and `ProofProvider`
pub trait StorageAndProofProvider<Block, BE>: StorageProvider<Block, BE> + ProofProvider<Block> + Send + Sync
where
B: Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + 'static + Clone + Send + Sync,
RA: Send + Sync,
Block: BlockT,
BE: Backend<Block> + Send + Sync,
{}
/// Blanket implementation.
impl<Block, BE, P> StorageAndProofProvider<Block, BE> for P
where
Block: BlockT,
BE: Backend<Block> + Send + Sync,
P: StorageProvider<Block, BE> + ProofProvider<Block> + Send + Sync,
{}
/// Implementation of AuthoritySetForFinalityProver.
impl<BE, Block: BlockT> AuthoritySetForFinalityProver<Block> for Arc<dyn StorageAndProofProvider<Block, BE>>
where
BE: Backend<Block> + Send + Sync + 'static,
{
fn authorities(&self, block: &BlockId<Block>) -> ClientResult<AuthorityList> {
let storage_key = StorageKey(GRANDPA_AUTHORITIES_KEY.to_vec());
@@ -83,7 +95,7 @@ impl<B, E, Block: BlockT, RA> AuthoritySetForFinalityProver<Block> for Client<B,
}
fn prove_authorities(&self, block: &BlockId<Block>) -> ClientResult<StorageProof> {
self.read_proof(block, iter::once(GRANDPA_AUTHORITIES_KEY))
self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY))
}
}
@@ -146,11 +158,13 @@ impl<B, Block: BlockT> FinalityProofProvider<B, Block>
///
/// - backend for accessing blockchain data;
/// - authority_provider for calling and proving runtime methods.
pub fn new(
pub fn new<P>(
backend: Arc<B>,
authority_provider: Arc<dyn AuthoritySetForFinalityProver<Block>>,
) -> Self {
FinalityProofProvider { backend, authority_provider }
authority_provider: P,
) -> Self
where P: AuthoritySetForFinalityProver<Block> + 'static,
{
FinalityProofProvider { backend, authority_provider: Arc::new(authority_provider) }
}
}
@@ -541,8 +541,7 @@ impl<Backend, Block: BlockT, Client, SC> GrandpaBlockImport<Backend, Block, Clie
}
}
impl<BE, Block: BlockT, Client, SC>
GrandpaBlockImport<BE, Block, Client, SC>
impl<BE, Block: BlockT, Client, SC> GrandpaBlockImport<BE, Block, Client, SC>
where
BE: Backend<Block>,
Client: crate::ClientForGrandpa<Block, BE>,
+7 -10
View File
@@ -57,11 +57,11 @@ use futures::StreamExt;
use log::{debug, info};
use futures::channel::mpsc;
use sc_client_api::{
backend::{AuxStore, Backend},
LockImportRun, BlockchainEvents, CallExecutor,
backend::{AuxStore, Backend}, ExecutionStrategy, Finalizer, TransactionFor,
ExecutionStrategy, Finalizer, TransactionFor, ExecutorProvider,
};
use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata};
use sc_client::Client;
use parity_scale_codec::{Decode, Encode};
use sp_runtime::generic::BlockId;
use sp_runtime::traits::{NumberFor, Block as BlockT, DigestFor, Zero};
@@ -96,7 +96,7 @@ mod observer;
mod until_imported;
mod voting_rule;
pub use finality_proof::FinalityProofProvider;
pub use finality_proof::{FinalityProofProvider, StorageAndProofProvider};
pub use justification::GrandpaJustification;
pub use light_import::light_block_import;
pub use voting_rule::{
@@ -266,7 +266,7 @@ impl<Block: BlockT, Client> BlockStatus<Block> for Arc<Client> where
pub trait ClientForGrandpa<Block, BE>:
LockImportRun<Block, BE> + Finalizer<Block, BE> + AuxStore
+ HeaderMetadata<Block, Error = sp_blockchain::Error> + HeaderBackend<Block>
+ BlockchainEvents<Block> + ProvideRuntimeApi<Block>
+ BlockchainEvents<Block> + ProvideRuntimeApi<Block> + ExecutorProvider<Block>
+ BlockImport<Block, Transaction = TransactionFor<BE, Block>, Error = sp_consensus::Error>
where
BE: Backend<Block>,
@@ -279,7 +279,7 @@ impl<Block, BE, T> ClientForGrandpa<Block, BE> for T
Block: BlockT,
T: LockImportRun<Block, BE> + Finalizer<Block, BE> + AuxStore
+ HeaderMetadata<Block, Error = sp_blockchain::Error> + HeaderBackend<Block>
+ BlockchainEvents<Block> + ProvideRuntimeApi<Block>
+ BlockchainEvents<Block> + ProvideRuntimeApi<Block> + ExecutorProvider<Block>
+ BlockImport<Block, Transaction = TransactionFor<BE, Block>, Error = sp_consensus::Error>,
{}
@@ -387,11 +387,8 @@ pub trait GenesisAuthoritySetProvider<Block: BlockT> {
fn get(&self) -> Result<AuthorityList, ClientError>;
}
impl<B, E, Block: BlockT, RA> GenesisAuthoritySetProvider<Block> for Client<B, E, Block, RA>
where
B: Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + Send + Sync,
RA: Send + Sync,
impl<Block: BlockT, E> GenesisAuthoritySetProvider<Block> for Arc<dyn ExecutorProvider<Block, Executor = E>>
where E: CallExecutor<Block>,
{
fn get(&self) -> Result<AuthorityList, ClientError> {
// This implementation uses the Grandpa runtime API instead of reading directly from the
@@ -18,7 +18,9 @@ use std::collections::HashMap;
use std::sync::Arc;
use log::{info, trace, warn};
use parking_lot::RwLock;
use sc_client_api::backend::{AuxStore, Backend, Finalizer, TransactionFor};
use sc_client_api::{
backend::{AuxStore, Backend, Finalizer, TransactionFor},
};
use sp_blockchain::{HeaderBackend, Error as ClientError, well_known_cache_keys};
use parity_scale_codec::{Encode, Decode};
use sp_consensus::{
@@ -328,7 +328,7 @@ impl<B, BE, C, N> Future for ObserverWork<B, BE, C, N>
where
B: BlockT,
BE: Backend<B> + Unpin + 'static,
C: crate::ClientForGrandpa<B, BE>+ 'static,
C: crate::ClientForGrandpa<B, BE> + 'static,
N: NetworkT<B>,
NumberFor<B>: BlockNumberOps,
{
@@ -39,7 +39,7 @@ use sp_consensus::{
use std::{
collections::{HashMap, HashSet},
result,
pin::Pin, task,
pin::Pin,
};
use parity_scale_codec::Decode;
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HashFor};
@@ -170,8 +170,7 @@ impl TestNetFactory for GrandpaTestNet {
) -> Option<Arc<dyn sc_network::config::FinalityProofProvider<Block>>> {
match client {
PeersClient::Full(_, ref backend) => {
let authorities_provider = Arc::new(self.test_config.clone());
Some(Arc::new(FinalityProofProvider::new(backend.clone(), authorities_provider)))
Some(Arc::new(FinalityProofProvider::new(backend.clone(), self.test_config.clone())))
},
PeersClient::Light(_, _) => None,
}
+23 -14
View File
@@ -18,7 +18,7 @@
use sc_client::Client as SubstrateClient;
use sp_blockchain::{Error, Info as BlockchainInfo};
use sc_client_api::{ChangesProof, StorageProof, CallExecutor};
use sc_client_api::{ChangesProof, StorageProof, CallExecutor, ProofProvider};
use sp_consensus::{BlockImport, BlockStatus, Error as ConsensusError};
use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor};
use sp_runtime::generic::{BlockId};
@@ -50,7 +50,11 @@ pub trait Client<Block: BlockT>: Send + Sync {
-> Result<(Block::Header, StorageProof), Error>;
/// Get storage read execution proof.
fn read_proof(&self, block: &Block::Hash, keys: &[Vec<u8>]) -> Result<StorageProof, Error>;
fn read_proof(
&self,
block: &Block::Hash,
keys: &mut dyn Iterator<Item=&[u8]>,
) -> Result<StorageProof, Error>;
/// Get child storage read execution proof.
fn read_child_proof(
@@ -58,7 +62,7 @@ pub trait Client<Block: BlockT>: Send + Sync {
block: &Block::Hash,
storage_key: &[u8],
child_info: ChildInfo,
keys: &[Vec<u8>],
keys: &mut dyn Iterator<Item=&[u8]>,
) -> Result<StorageProof, Error>;
/// Get method execution proof.
@@ -125,14 +129,19 @@ impl<B, E, Block, RA> Client<Block> for SubstrateClient<B, E, Block, RA> where
(self as &SubstrateClient<B, E, Block, RA>).justification(id)
}
fn header_proof(&self, block_number: <Block::Header as HeaderT>::Number)
-> Result<(Block::Header, StorageProof), Error>
{
(self as &SubstrateClient<B, E, Block, RA>).header_proof(&BlockId::Number(block_number))
fn header_proof(
&self,
block_number: <Block::Header as HeaderT>::Number,
)-> Result<(Block::Header, StorageProof), Error> {
ProofProvider::<Block>::header_proof(self, &BlockId::Number(block_number))
}
fn read_proof(&self, block: &Block::Hash, keys: &[Vec<u8>]) -> Result<StorageProof, Error> {
(self as &SubstrateClient<B, E, Block, RA>).read_proof(&BlockId::Hash(block.clone()), keys)
fn read_proof(
&self,
block: &Block::Hash,
keys: &mut dyn Iterator<Item=&[u8]>,
) -> Result<StorageProof, Error> {
ProofProvider::<Block>::read_proof(self, &BlockId::Hash(block.clone()), keys)
}
fn read_child_proof(
@@ -140,10 +149,9 @@ impl<B, E, Block, RA> Client<Block> for SubstrateClient<B, E, Block, RA> where
block: &Block::Hash,
storage_key: &[u8],
child_info: ChildInfo,
keys: &[Vec<u8>],
keys: &mut dyn Iterator<Item=&[u8]>,
) -> Result<StorageProof, Error> {
(self as &SubstrateClient<B, E, Block, RA>)
.read_child_proof(&BlockId::Hash(block.clone()), storage_key, child_info, keys)
ProofProvider::<Block>::read_child_proof(self, &BlockId::Hash(block.clone()), storage_key, child_info, keys)
}
fn execution_proof(
@@ -152,7 +160,8 @@ impl<B, E, Block, RA> Client<Block> for SubstrateClient<B, E, Block, RA> where
method: &str,
data: &[u8],
) -> Result<(Vec<u8>, StorageProof), Error> {
(self as &SubstrateClient<B, E, Block, RA>).execution_proof(
ProofProvider::<Block>::execution_proof(
self,
&BlockId::Hash(block.clone()),
method,
data,
@@ -168,7 +177,7 @@ impl<B, E, Block, RA> Client<Block> for SubstrateClient<B, E, Block, RA> where
storage_key: Option<&StorageKey>,
key: &StorageKey,
) -> Result<ChangesProof<Block::Header>, Error> {
(self as &SubstrateClient<B, E, Block, RA>).key_changes_proof(first, last, min, max, storage_key, key)
ProofProvider::<Block>::key_changes_proof(self, first, last, min, max, storage_key, key)
}
fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result<bool, Error> {
+5 -2
View File
@@ -1473,7 +1473,10 @@ impl<B: BlockT, H: ExHashT> Protocol<B, H> {
trace!(target: "sync", "Remote read request {} from {} ({} at {})",
request.id, who, keys_str(), request.block);
let proof = match self.context_data.chain.read_proof(&request.block, &request.keys) {
let proof = match self.context_data.chain.read_proof(
&request.block,
&mut request.keys.iter().map(AsRef::as_ref)
) {
Ok(proof) => proof,
Err(error) => {
trace!(target: "sync", "Remote read request {} from {} ({} at {}) failed with: {}",
@@ -1523,7 +1526,7 @@ impl<B: BlockT, H: ExHashT> Protocol<B, H> {
&request.block,
&request.storage_key,
child_info,
&request.keys,
&mut request.keys.iter().map(AsRef::as_ref),
) {
Ok(proof) => proof,
Err(error) => {
@@ -467,7 +467,7 @@ where
let block = Decode::decode(&mut request.block.as_ref())?;
let proof = match self.chain.read_proof(&block, &request.keys) {
let proof = match self.chain.read_proof(&block, &mut request.keys.iter().map(AsRef::as_ref)) {
Ok(proof) => proof,
Err(error) => {
log::trace!("remote read request from {} ({} at {:?}) failed with: {}",
@@ -508,7 +508,12 @@ where
let proof =
if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) {
match self.chain.read_child_proof(&block, &request.storage_key, info, &request.keys) {
match self.chain.read_child_proof(
&block,
&request.storage_key,
info,
&mut request.keys.iter().map(AsRef::as_ref)
) {
Ok(proof) => proof,
Err(error) => {
log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}",
+1
View File
@@ -169,6 +169,7 @@ mod tests {
use substrate_test_runtime_client::runtime::Block;
use sc_transaction_pool::{BasicPool, FullChainApi};
use sp_transaction_pool::{TransactionPool, InPoolTransaction};
use sc_client_api::ExecutorProvider;
struct MockNetworkStateInfo();
+15 -23
View File
@@ -22,8 +22,7 @@ mod tests;
use std::{sync::Arc, convert::TryInto};
use log::warn;
use sc_client::Client;
use sp_blockchain::Error as ClientError;
use sp_blockchain::{Error as ClientError, HeaderBackend};
use rpc::futures::{
Sink, Future,
@@ -36,7 +35,7 @@ use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId};
use codec::{Encode, Decode};
use sp_core::{Bytes, traits::BareCryptoStorePtr};
use sp_api::ProvideRuntimeApi;
use sp_runtime::{generic, traits};
use sp_runtime::generic;
use sp_transaction_pool::{
TransactionPool, InPoolTransaction, TransactionStatus,
BlockHash, TxHash, TransactionFor, error::IntoPoolError,
@@ -48,9 +47,9 @@ pub use sc_rpc_api::author::*;
use self::error::{Error, FutureResult, Result};
/// Authoring API
pub struct Author<B, E, P, Block: traits::Block, RA> {
pub struct Author<P, Client> {
/// Substrate client
client: Arc<Client<B, E, Block, RA>>,
client: Arc<Client>,
/// Transactions pool
pool: Arc<P>,
/// Subscriptions manager
@@ -59,10 +58,10 @@ pub struct Author<B, E, P, Block: traits::Block, RA> {
keystore: BareCryptoStorePtr,
}
impl<B, E, P, Block: traits::Block, RA> Author<B, E, P, Block, RA> {
impl<P, Client> Author<P, Client> {
/// Create new instance of Authoring API.
pub fn new(
client: Arc<Client<B, E, Block, RA>>,
client: Arc<Client>,
pool: Arc<P>,
subscriptions: Subscriptions,
keystore: BareCryptoStorePtr,
@@ -76,18 +75,11 @@ impl<B, E, P, Block: traits::Block, RA> Author<B, E, P, Block, RA> {
}
}
impl<B, E, P, RA> AuthorApi<TxHash<P>, BlockHash<P>>
for Author<B, E, P, <P as TransactionPool>::Block, RA>
where
B: sc_client_api::backend::Backend<<P as TransactionPool>::Block> + Send + Sync + 'static,
E: sc_client::CallExecutor<<P as TransactionPool>::Block> + Send + Sync + 'static,
P: TransactionPool + Sync + Send + 'static,
P::Block: traits::Block,
P::Error: 'static,
RA: Send + Sync + 'static,
Client<B, E, P::Block, RA>: ProvideRuntimeApi<P::Block>,
<Client<B, E, P::Block, RA> as ProvideRuntimeApi<P::Block>>::Api:
SessionKeys<P::Block, Error = ClientError>,
impl<P, Client> AuthorApi<TxHash<P>, BlockHash<P>> for Author<P, Client>
where
P: TransactionPool + Sync + Send + 'static,
Client: HeaderBackend<P::Block> + ProvideRuntimeApi<P::Block> + Send + Sync + 'static,
Client::Api: SessionKeys<P::Block, Error = ClientError>,
{
type Metadata = crate::metadata::Metadata;
@@ -105,7 +97,7 @@ where
}
fn rotate_keys(&self) -> Result<Bytes> {
let best_block_hash = self.client.chain_info().best_hash;
let best_block_hash = self.client.info().best_hash;
self.client.runtime_api().generate_session_keys(
&generic::BlockId::Hash(best_block_hash),
None,
@@ -113,7 +105,7 @@ where
}
fn has_session_keys(&self, session_keys: Bytes) -> Result<bool> {
let best_block_hash = self.client.chain_info().best_hash;
let best_block_hash = self.client.info().best_hash;
let keys = self.client.runtime_api().decode_session_keys(
&generic::BlockId::Hash(best_block_hash),
session_keys.to_vec(),
@@ -133,7 +125,7 @@ where
Ok(xt) => xt,
Err(err) => return Box::new(result(Err(err.into()))),
};
let best_block_hash = self.client.chain_info().best_hash;
let best_block_hash = self.client.info().best_hash;
Box::new(self.pool
.submit_one(&generic::BlockId::hash(best_block_hash), xt)
.compat()
@@ -176,7 +168,7 @@ where
xt: Bytes,
) {
let submit = || -> Result<_> {
let best_block_hash = self.client.chain_info().best_hash;
let best_block_hash = self.client.info().best_hash;
let dxt = TransactionFor::<P>::decode(&mut &xt[..])
.map_err(error::Error::from)?;
Ok(
+3 -3
View File
@@ -25,8 +25,8 @@ use sp_core::{
};
use rpc::futures::Stream as _;
use substrate_test_runtime_client::{
self, AccountKeyring, runtime::{Extrinsic, Transfer, SessionKeys, RuntimeApi, Block},
DefaultTestClientBuilderExt, TestClientBuilderExt, Backend, Client, Executor,
self, AccountKeyring, runtime::{Extrinsic, Transfer, SessionKeys, Block},
DefaultTestClientBuilderExt, TestClientBuilderExt, Backend, Client,
};
use sc_transaction_pool::{BasicPool, FullChainApi};
use tokio::runtime;
@@ -75,7 +75,7 @@ impl Default for TestSetup {
}
impl TestSetup {
fn author(&self) -> Author<Backend, Executor, FullTransactionPool, Block, RuntimeApi> {
fn author(&self) -> Author<FullTransactionPool, Client<Backend>> {
Author {
client: self.client.clone(),
pool: self.pool.clone(),
+14 -12
View File
@@ -20,37 +20,39 @@ use std::sync::Arc;
use rpc::futures::future::result;
use sc_rpc_api::Subscriptions;
use sc_client_api::{CallExecutor, backend::Backend};
use sc_client::Client;
use sc_client_api::{BlockchainEvents, BlockBody};
use sp_runtime::{generic::{BlockId, SignedBlock}, traits::{Block as BlockT}};
use super::{ChainBackend, client_err, error::FutureResult};
use std::marker::PhantomData;
use sp_blockchain::HeaderBackend;
/// Blockchain API backend for full nodes. Reads all the data from local database.
pub struct FullChain<B, E, Block: BlockT, RA> {
pub struct FullChain<Block: BlockT, Client> {
/// Substrate client.
client: Arc<Client<B, E, Block, RA>>,
client: Arc<Client>,
/// Current subscriptions.
subscriptions: Subscriptions,
/// phantom member to pin the block type
_phantom: PhantomData<Block>,
}
impl<B, E, Block: BlockT, RA> FullChain<B, E, Block, RA> {
impl<Block: BlockT, Client> FullChain<Block, Client> {
/// Create new Chain API RPC handler.
pub fn new(client: Arc<Client<B, E, Block, RA>>, subscriptions: Subscriptions) -> Self {
pub fn new(client: Arc<Client>, subscriptions: Subscriptions) -> Self {
Self {
client,
subscriptions,
_phantom: PhantomData,
}
}
}
impl<B, E, Block, RA> ChainBackend<B, E, Block, RA> for FullChain<B, E, Block, RA> where
impl<Block, Client> ChainBackend<Client, Block> for FullChain<Block, Client> where
Block: BlockT + 'static,
B: Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + Send + Sync + 'static,
RA: Send + Sync + 'static,
Client: BlockBody<Block> + HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
{
fn client(&self) -> &Arc<Client<B, E, Block, RA>> {
fn client(&self) -> &Arc<Client> {
&self.client
}
@@ -60,7 +62,7 @@ impl<B, E, Block, RA> ChainBackend<B, E, Block, RA> for FullChain<B, E, Block, R
fn header(&self, hash: Option<Block::Hash>) -> FutureResult<Option<Block::Header>> {
Box::new(result(self.client
.header(&BlockId::Hash(self.unwrap_or_best(hash)))
.header(BlockId::Hash(self.unwrap_or_best(hash)))
.map_err(client_err)
))
}
+10 -10
View File
@@ -22,7 +22,7 @@ use rpc::futures::future::{result, Future, Either};
use sc_rpc_api::Subscriptions;
use sc_client::{
Client, light::{fetcher::{Fetcher, RemoteBodyRequest}, blockchain::RemoteBlockchain},
light::{fetcher::{Fetcher, RemoteBodyRequest}, blockchain::RemoteBlockchain},
};
use sp_runtime::{
generic::{BlockId, SignedBlock},
@@ -30,12 +30,14 @@ use sp_runtime::{
};
use super::{ChainBackend, client_err, error::FutureResult};
use sp_blockchain::HeaderBackend;
use sc_client_api::BlockchainEvents;
/// Blockchain API backend for light nodes. Reads all the data from local
/// database, if available, or fetches it from remote node otherwise.
pub struct LightChain<B, E, Block: BlockT, RA, F> {
pub struct LightChain<Block: BlockT, Client, F> {
/// Substrate client.
client: Arc<Client<B, E, Block, RA>>,
client: Arc<Client>,
/// Current subscriptions.
subscriptions: Subscriptions,
/// Remote blockchain reference
@@ -44,10 +46,10 @@ pub struct LightChain<B, E, Block: BlockT, RA, F> {
fetcher: Arc<F>,
}
impl<B, E, Block: BlockT, RA, F: Fetcher<Block>> LightChain<B, E, Block, RA, F> {
impl<Block: BlockT, Client, F: Fetcher<Block>> LightChain<Block, Client, F> {
/// Create new Chain API RPC handler.
pub fn new(
client: Arc<Client<B, E, Block, RA>>,
client: Arc<Client>,
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
@@ -61,14 +63,12 @@ impl<B, E, Block: BlockT, RA, F: Fetcher<Block>> LightChain<B, E, Block, RA, F>
}
}
impl<B, E, Block, RA, F> ChainBackend<B, E, Block, RA> for LightChain<B, E, Block, RA, F> where
impl<Block, Client, F> ChainBackend<Client, Block> for LightChain<Block, Client, F> where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: sc_client::CallExecutor<Block> + Send + Sync + 'static,
RA: Send + Sync + 'static,
Client: BlockchainEvents<Block> + HeaderBackend<Block> + Send + Sync + 'static,
F: Fetcher<Block> + Send + Sync + 'static,
{
fn client(&self) -> &Arc<Client<B, E, Block, RA>> {
fn client(&self) -> &Arc<Client> {
&self.client
}
+32 -36
View File
@@ -32,7 +32,7 @@ use rpc::{
use sc_rpc_api::Subscriptions;
use sc_client::{
self, Client, BlockchainEvents,
self, BlockchainEvents,
light::{fetcher::Fetcher, blockchain::RemoteBlockchain},
};
use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId};
@@ -45,16 +45,17 @@ use sp_runtime::{
use self::error::{Result, Error, FutureResult};
pub use sc_rpc_api::chain::*;
use sp_blockchain::HeaderBackend;
use sc_client_api::BlockBody;
/// Blockchain backend API
trait ChainBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
trait ChainBackend<Client, Block: BlockT>: Send + Sync + 'static
where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: sc_client::CallExecutor<Block> + Send + Sync + 'static,
Client: HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
{
/// Get client reference.
fn client(&self) -> &Arc<Client<B, E, Block, RA>>;
fn client(&self) -> &Arc<Client>;
/// Get subscriptions reference.
fn subscriptions(&self) -> &Subscriptions;
@@ -62,7 +63,7 @@ trait ChainBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
/// Tries to unwrap passed block hash, or uses best block hash otherwise.
fn unwrap_or_best(&self, hash: Option<Block::Hash>) -> Block::Hash {
match hash.into() {
None => self.client().chain_info().best_hash,
None => self.client().info().best_hash,
Some(hash) => hash,
}
}
@@ -81,9 +82,9 @@ trait ChainBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
number: Option<NumberOrHex<NumberFor<Block>>>,
) -> Result<Option<Block::Hash>> {
Ok(match number {
None => Some(self.client().chain_info().best_hash),
None => Some(self.client().info().best_hash),
Some(num_or_hex) => self.client()
.header(&BlockId::number(num_or_hex.to_number()?))
.header(BlockId::number(num_or_hex.to_number()?))
.map_err(client_err)?
.map(|h| h.hash()),
})
@@ -91,7 +92,7 @@ trait ChainBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
/// Get hash of the last finalized block in the canon chain.
fn finalized_head(&self) -> Result<Block::Hash> {
Ok(self.client().chain_info().finalized_hash)
Ok(self.client().info().finalized_hash)
}
/// All new head subscription
@@ -104,7 +105,7 @@ trait ChainBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
self.client(),
self.subscriptions(),
subscriber,
|| self.client().chain_info().best_hash,
|| self.client().info().best_hash,
|| self.client().import_notification_stream()
.map(|notification| Ok::<_, ()>(notification.header))
.compat(),
@@ -130,7 +131,7 @@ trait ChainBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
self.client(),
self.subscriptions(),
subscriber,
|| self.client().chain_info().best_hash,
|| self.client().info().best_hash,
|| self.client().import_notification_stream()
.filter(|notification| future::ready(notification.is_new_best))
.map(|notification| Ok::<_, ()>(notification.header))
@@ -157,7 +158,7 @@ trait ChainBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
self.client(),
self.subscriptions(),
subscriber,
|| self.client().chain_info().finalized_hash,
|| self.client().info().finalized_hash,
|| self.client().finality_notification_stream()
.map(|notification| Ok::<_, ()>(notification.header))
.compat(),
@@ -175,15 +176,13 @@ trait ChainBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
}
/// Create new state API that works on full node.
pub fn new_full<B, E, Block: BlockT, RA>(
client: Arc<Client<B, E, Block, RA>>,
pub fn new_full<Block: BlockT, Client>(
client: Arc<Client>,
subscriptions: Subscriptions,
) -> Chain<B, E, Block, RA>
) -> Chain<Block, Client>
where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: sc_client::CallExecutor<Block> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client: BlockBody<Block> + HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
{
Chain {
backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)),
@@ -191,17 +190,15 @@ pub fn new_full<B, E, Block: BlockT, RA>(
}
/// Create new state API that works on light node.
pub fn new_light<B, E, Block: BlockT, RA, F: Fetcher<Block>>(
client: Arc<Client<B, E, Block, RA>>,
pub fn new_light<Block: BlockT, Client, F: Fetcher<Block>>(
client: Arc<Client>,
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
) -> Chain<B, E, Block, RA>
) -> Chain<Block, Client>
where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: sc_client::CallExecutor<Block> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client: BlockBody<Block> + HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
F: Send + Sync + 'static,
{
Chain {
@@ -215,15 +212,15 @@ pub fn new_light<B, E, Block: BlockT, RA, F: Fetcher<Block>>(
}
/// Chain API with subscriptions support.
pub struct Chain<B, E, Block: BlockT, RA> {
backend: Box<dyn ChainBackend<B, E, Block, RA>>,
pub struct Chain<Block: BlockT, Client> {
backend: Box<dyn ChainBackend<Client, Block>>,
}
impl<B, E, Block, RA> ChainApi<NumberFor<Block>, Block::Hash, Block::Header, SignedBlock<Block>> for Chain<B, E, Block, RA> where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: sc_client::CallExecutor<Block> + Send + Sync + 'static,
RA: Send + Sync + 'static
impl<Block, Client> ChainApi<NumberFor<Block>, Block::Hash, Block::Header, SignedBlock<Block>> for
Chain<Block, Client>
where
Block: BlockT + 'static,
Client: HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
{
type Metadata = crate::metadata::Metadata;
@@ -281,16 +278,15 @@ impl<B, E, Block, RA> ChainApi<NumberFor<Block>, Block::Hash, Block::Header, Sig
}
/// Subscribe to new headers.
fn subscribe_headers<B, E, Block, RA, F, G, S, ERR>(
client: &Arc<Client<B, E, Block, RA>>,
fn subscribe_headers<Block, Client, F, G, S, ERR>(
client: &Arc<Client>,
subscriptions: &Subscriptions,
subscriber: Subscriber<Block::Header>,
best_block_hash: G,
stream: F,
) where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: sc_client::CallExecutor<Block> + Send + Sync + 'static,
Client: HeaderBackend<Block> + 'static,
F: FnOnce() -> S,
G: FnOnce() -> Block::Hash,
ERR: ::std::fmt::Debug,
@@ -298,7 +294,7 @@ fn subscribe_headers<B, E, Block, RA, F, G, S, ERR>(
{
subscriptions.add(subscriber, |sink| {
// send current head right at the start.
let header = client.header(&BlockId::Hash(best_block_hash()))
let header = client.header(BlockId::Hash(best_block_hash()))
.map_err(client_err)
.and_then(|header| {
header.ok_or_else(|| "Best header missing.".to_owned().into())
+27 -27
View File
@@ -27,26 +27,26 @@ use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId};
use rpc::{Result as RpcResult, futures::{Future, future::result}};
use sc_rpc_api::Subscriptions;
use sc_client::{Client, CallExecutor, light::{blockchain::RemoteBlockchain, fetcher::Fetcher}};
use sc_client::{light::{blockchain::RemoteBlockchain, fetcher::Fetcher}};
use sp_core::{Bytes, storage::{StorageKey, StorageData, StorageChangeSet}};
use sp_version::RuntimeVersion;
use sp_runtime::traits::Block as BlockT;
use sp_api::{Metadata, ProvideRuntimeApi};
use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt};
use self::error::{Error, FutureResult};
pub use sc_rpc_api::state::*;
use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend};
use sp_blockchain::{HeaderMetadata, HeaderBackend};
const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000;
/// State backend API.
pub trait StateBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
pub trait StateBackend<Block: BlockT, Client>: Send + Sync + 'static
where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: sc_client::CallExecutor<Block> + Send + Sync + 'static,
RA: Send + Sync + 'static,
Client: Send + Sync + 'static,
{
/// Call runtime method at given block.
fn call(
@@ -194,18 +194,18 @@ pub trait StateBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
}
/// Create new state API that works on full node.
pub fn new_full<B, E, Block: BlockT, RA>(
client: Arc<Client<B, E, Block, RA>>,
pub fn new_full<BE, Block: BlockT, Client>(
client: Arc<Client>,
subscriptions: Subscriptions,
) -> State<B, E, Block, RA>
) -> State<Block, Client>
where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client<B, E, Block, RA>: ProvideRuntimeApi<Block>,
<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api:
Metadata<Block, Error = sp_blockchain::Error>,
BE: Backend<Block> + 'static,
Client: ExecutorProvider<Block> + StorageProvider<Block, BE> + HeaderBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error> + BlockchainEvents<Block>
+ CallApiAt<Block, Error = sp_blockchain::Error>
+ ProvideRuntimeApi<Block> + Send + Sync + 'static,
Client::Api: Metadata<Block, Error = sp_blockchain::Error>,
{
State {
backend: Box::new(self::state_full::FullState::new(client, subscriptions)),
@@ -213,17 +213,19 @@ pub fn new_full<B, E, Block: BlockT, RA>(
}
/// Create new state API that works on light node.
pub fn new_light<B, E, Block: BlockT, RA, F: Fetcher<Block>>(
client: Arc<Client<B, E, Block, RA>>,
pub fn new_light<BE, Block: BlockT, Client, F: Fetcher<Block>>(
client: Arc<Client>,
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
) -> State<B, E, Block, RA>
) -> State<Block, Client>
where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
BE: Backend<Block> + 'static,
Client: ExecutorProvider<Block> + StorageProvider<Block, BE>
+ HeaderMetadata<Block, Error = sp_blockchain::Error>
+ ProvideRuntimeApi<Block> + HeaderBackend<Block> + BlockchainEvents<Block>
+ Send + Sync + 'static,
F: Send + Sync + 'static,
{
State {
@@ -237,16 +239,14 @@ pub fn new_light<B, E, Block: BlockT, RA, F: Fetcher<Block>>(
}
/// State API with subscriptions support.
pub struct State<B, E, Block, RA> {
backend: Box<dyn StateBackend<B, E, Block, RA>>,
pub struct State<Block, Client> {
backend: Box<dyn StateBackend<Block, Client>>,
}
impl<B, E, Block, RA> StateApi<Block::Hash> for State<B, E, Block, RA>
impl<Block, Client> StateApi<Block::Hash> for State<Block, Client>
where
Block: BlockT + 'static,
B: sc_client_api::backend::Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client: Send + Sync + 'static,
{
type Metadata = crate::metadata::Metadata;
+24 -24
View File
@@ -26,12 +26,8 @@ use rpc::{Result as RpcResult, futures::{stream, Future, Sink, Stream, future::r
use sc_rpc_api::Subscriptions;
use sc_client_api::backend::Backend;
use sp_blockchain::{
Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata
};
use sc_client::{
Client, CallExecutor, BlockchainEvents
};
use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata, HeaderBackend};
use sc_client::BlockchainEvents;
use sp_core::{
Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo},
};
@@ -40,9 +36,11 @@ use sp_runtime::{
generic::BlockId, traits::{Block as BlockT, NumberFor, SaturatedConversion},
};
use sp_api::{Metadata, ProvideRuntimeApi};
use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt};
use super::{StateBackend, error::{FutureResult, Error, Result}, client_err, child_resolution_error};
use std::marker::PhantomData;
use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider};
/// Ranges to query in state_queryStorage.
struct QueryStorageRange<Block: BlockT> {
@@ -59,25 +57,27 @@ struct QueryStorageRange<Block: BlockT> {
}
/// State API backend for full nodes.
pub struct FullState<B, E, Block: BlockT, RA> {
client: Arc<Client<B, E, Block, RA>>,
pub struct FullState<BE, Block: BlockT, Client> {
client: Arc<Client>,
subscriptions: Subscriptions,
_phantom: PhantomData<(BE, Block)>
}
impl<B, E, Block: BlockT, RA> FullState<B, E, Block, RA>
impl<BE, Block: BlockT, Client> FullState<BE, Block, Client>
where
BE: Backend<Block>,
Client: StorageProvider<Block, BE> + HeaderBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error>,
Block: BlockT + 'static,
B: Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + Send + Sync + 'static + Clone,
{
/// Create new state API backend for full nodes.
pub fn new(client: Arc<Client<B, E, Block, RA>>, subscriptions: Subscriptions) -> Self {
Self { client, subscriptions }
pub fn new(client: Arc<Client>, subscriptions: Subscriptions) -> Self {
Self { client, subscriptions, _phantom: PhantomData }
}
/// Returns given block hash or best block hash if None is passed.
fn block_or_best(&self, hash: Option<Block::Hash>) -> ClientResult<Block::Hash> {
Ok(hash.unwrap_or_else(|| self.client.chain_info().best_hash))
Ok(hash.unwrap_or_else(|| self.client.info().best_hash))
}
/// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges.
@@ -212,14 +212,14 @@ impl<B, E, Block: BlockT, RA> FullState<B, E, Block, RA>
}
}
impl<B, E, Block, RA> StateBackend<B, E, Block, RA> for FullState<B, E, Block, RA> where
impl<BE, Block, Client> StateBackend<Block, Client> for FullState<BE, Block, Client> where
Block: BlockT + 'static,
B: Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client<B, E, Block, RA>: ProvideRuntimeApi<Block>,
<Client<B, E, Block, RA> as ProvideRuntimeApi<Block>>::Api:
Metadata<Block, Error = sp_blockchain::Error>,
BE: Backend<Block> + 'static,
Client: ExecutorProvider<Block> + StorageProvider<Block, BE> + HeaderBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error> + BlockchainEvents<Block>
+ CallApiAt<Block, Error = sp_blockchain::Error> + ProvideRuntimeApi<Block>
+ Send + Sync + 'static,
Client::Api: Metadata<Block, Error = sp_blockchain::Error>,
{
fn call(
&self,
@@ -424,7 +424,7 @@ impl<B, E, Block, RA> StateBackend<B, E, Block, RA> for FullState<B, E, Block, R
let stream = stream
.filter_map(move |_| {
let info = client.chain_info();
let info = client.info();
let version = client
.runtime_version_at(&BlockId::hash(info.best_hash))
.map_err(client_err)
@@ -478,7 +478,7 @@ impl<B, E, Block, RA> StateBackend<B, E, Block, RA> for FullState<B, E, Block, R
// initial values
let initial = stream::iter_result(keys
.map(|keys| {
let block = self.client.chain_info().best_hash;
let block = self.client.info().best_hash;
let changes = keys
.into_iter()
.map(|key| self.storage(Some(block.clone()).into(), key.clone())
+10 -15
View File
@@ -39,10 +39,9 @@ use rpc::{
};
use sc_rpc_api::Subscriptions;
use sc_client_api::backend::Backend;
use sp_blockchain::Error as ClientError;
use sp_blockchain::{Error as ClientError, HeaderBackend};
use sc_client::{
BlockchainEvents, Client, CallExecutor,
BlockchainEvents,
light::{
blockchain::{future_header, RemoteBlockchain},
fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadChildRequest},
@@ -60,8 +59,8 @@ use super::{StateBackend, error::{FutureResult, Error}, client_err};
type StorageMap = HashMap<StorageKey, Option<StorageData>>;
/// State API backend for light nodes.
pub struct LightState<Block: BlockT, F: Fetcher<Block>, B, E, RA> {
client: Arc<Client<B, E, Block, RA>>,
pub struct LightState<Block: BlockT, F: Fetcher<Block>, Client> {
client: Arc<Client>,
subscriptions: Subscriptions,
version_subscriptions: SimpleSubscriptions<Block::Hash, RuntimeVersion>,
storage_subscriptions: Arc<Mutex<StorageSubscriptions<Block>>>,
@@ -134,16 +133,14 @@ impl<Hash, V> SharedRequests<Hash, V> for SimpleSubscriptions<Hash, V> where
}
}
impl<Block: BlockT, F: Fetcher<Block> + 'static, B, E, RA> LightState<Block, F, B, E, RA>
impl<Block: BlockT, F: Fetcher<Block> + 'static, Client> LightState<Block, F, Client>
where
Block: BlockT,
B: Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client: HeaderBackend<Block> + Send + Sync + 'static,
{
/// Create new state API backend for light nodes.
pub fn new(
client: Arc<Client<B, E, Block, RA>>,
client: Arc<Client>,
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
@@ -164,16 +161,14 @@ impl<Block: BlockT, F: Fetcher<Block> + 'static, B, E, RA> LightState<Block, F,
/// Returns given block hash or best block hash if None is passed.
fn block_or_best(&self, hash: Option<Block::Hash>) -> Block::Hash {
hash.unwrap_or_else(|| self.client.chain_info().best_hash)
hash.unwrap_or_else(|| self.client.info().best_hash)
}
}
impl<Block, F, B, E, RA> StateBackend<B, E, Block, RA> for LightState<Block, F, B, E, RA>
impl<Block, F, Client> StateBackend<Block, Client> for LightState<Block, F, Client>
where
Block: BlockT,
B: Backend<Block> + Send + Sync + 'static,
E: CallExecutor<Block> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client: BlockchainEvents<Block> + HeaderBackend<Block> + Send + Sync + 'static,
F: Fetcher<Block> + 'static
{
fn call(
+4 -1
View File
@@ -23,6 +23,7 @@ use sc_client_api::{
BlockchainEvents,
backend::RemoteBackend, light::RemoteBlockchain,
execution_extensions::ExtensionsFactory,
ExecutorProvider, CallExecutor
};
use sc_client::Client;
use sc_chain_spec::{RuntimeGenesis, Extension};
@@ -799,7 +800,9 @@ ServiceBuilder<
TBackend::OffchainStorage,
TBl
>,
>, Error> {
>, Error>
where TExec: CallExecutor<TBl, Backend = TBackend>,
{
let ServiceBuilder {
marker: _,
mut config,
@@ -35,6 +35,7 @@ use sp_consensus::{
use sc_executor::{NativeExecutor, NativeExecutionDispatch};
use std::{io::{Read, Write, Seek}, pin::Pin};
use sc_client_api::BlockBody;
/// Build a chain spec json
pub fn build_spec<G, E>(spec: ChainSpec<G, E>, raw: bool) -> error::Result<String> where
+268 -316
View File
@@ -66,7 +66,8 @@ pub use sc_client_api::{
backend::{
self, BlockImportOperation, PrunableStateChangesTrieStorage,
ClientImportOperation, Finalizer, ImportSummary, NewBlockState,
LockImportRun, changes_tries_state_at_block,
changes_tries_state_at_block, StorageProvider,
LockImportRun,
},
client::{
ImportNotifications, FinalityNotification, FinalityNotifications, BlockImportNotification,
@@ -75,7 +76,7 @@ pub use sc_client_api::{
},
execution_extensions::{ExecutionExtensions, ExecutionStrategies},
notifications::{StorageNotifications, StorageEventStream},
CallExecutor,
CallExecutor, ExecutorProvider, ProofProvider,
};
use sp_blockchain::Error;
use prometheus_endpoint::Registry;
@@ -85,6 +86,7 @@ use crate::{
light::{call_executor::prove_execution, fetcher::ChangesProof},
in_mem, genesis, cht, block_rules::{BlockRules, LookupResult as BlockLookupResult},
};
use crate::client::backend::KeyIterator;
/// Substrate Client
pub struct Client<B, E, Block, RA> where Block: BlockT {
@@ -100,46 +102,6 @@ pub struct Client<B, E, Block, RA> where Block: BlockT {
_phantom: PhantomData<RA>,
}
/// An `Iterator` that iterates keys in a given block under a prefix.
pub struct KeyIterator<'a, State, Block> {
state: State,
prefix: Option<&'a StorageKey>,
current_key: Vec<u8>,
_phantom: PhantomData<Block>,
}
impl <'a, State, Block> KeyIterator<'a, State, Block> {
fn new(state: State, prefix: Option<&'a StorageKey>, current_key: Vec<u8>) -> Self {
Self {
state,
prefix,
current_key,
_phantom: PhantomData,
}
}
}
impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where
Block: BlockT,
State: StateBackend<HashFor<Block>>,
{
type Item = StorageKey;
fn next(&mut self) -> Option<Self::Item> {
let next_key = self.state
.next_storage_key(&self.current_key)
.ok()
.flatten()?;
if let Some(prefix) = self.prefix {
if !next_key.starts_with(&prefix.0[..]) {
return None;
}
}
self.current_key = next_key.clone();
Some(StorageKey(next_key))
}
}
// used in importing a block, where additional changes are made after the runtime
// executed.
enum PrePostHeader<H> {
@@ -324,119 +286,14 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
})
}
/// Get a reference to the execution extensions.
pub fn execution_extensions(&self) -> &ExecutionExtensions<Block> {
&self.execution_extensions
}
/// Get a reference to the state at a given block.
pub fn state_at(&self, block: &BlockId<Block>) -> sp_blockchain::Result<B::State> {
self.backend.state_at(*block)
}
/// Given a `BlockId` and a key prefix, return the matching storage keys in that block.
pub fn storage_keys(&self, id: &BlockId<Block>, key_prefix: &StorageKey) -> sp_blockchain::Result<Vec<StorageKey>> {
let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect();
Ok(keys)
}
/// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block.
pub fn storage_pairs(&self, id: &BlockId<Block>, key_prefix: &StorageKey)
-> sp_blockchain::Result<Vec<(StorageKey, StorageData)>>
{
let state = self.state_at(id)?;
let keys = state
.keys(&key_prefix.0)
.into_iter()
.map(|k| {
let d = state.storage(&k).ok().flatten().unwrap_or_default();
(StorageKey(k), StorageData(d))
})
.collect();
Ok(keys)
}
/// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block.
pub fn storage_keys_iter<'a>(
&self,
id: &BlockId<Block>,
prefix: Option<&'a StorageKey>,
start_key: Option<&StorageKey>
) -> sp_blockchain::Result<KeyIterator<'a, B::State, Block>> {
let state = self.state_at(id)?;
let start_key = start_key
.or(prefix)
.map(|key| key.0.clone())
.unwrap_or_else(Vec::new);
Ok(KeyIterator::new(state, prefix, start_key))
}
/// Given a `BlockId` and a key, return the value under the key in that block.
pub fn storage(&self, id: &BlockId<Block>, key: &StorageKey)
-> sp_blockchain::Result<Option<StorageData>>
{
Ok(self.state_at(id)?
.storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
.map(StorageData)
)
}
/// Given a `BlockId` and a key, return the value under the hash in that block.
pub fn storage_hash(&self, id: &BlockId<Block>, key: &StorageKey)
-> sp_blockchain::Result<Option<Block::Hash>>
{
Ok(self.state_at(id)?
.storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
)
}
/// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys.
pub fn child_storage_keys(
&self,
id: &BlockId<Block>,
child_storage_key: &StorageKey,
child_info: ChildInfo,
key_prefix: &StorageKey
) -> sp_blockchain::Result<Vec<StorageKey>> {
let keys = self.state_at(id)?
.child_keys(&child_storage_key.0, child_info, &key_prefix.0)
.into_iter()
.map(StorageKey)
.collect();
Ok(keys)
}
/// Given a `BlockId`, a key and a child storage key, return the value under the key in that block.
pub fn child_storage(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<StorageData>> {
Ok(self.state_at(id)?
.child_storage(&storage_key.0, child_info, &key.0)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
.map(StorageData))
}
/// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block.
pub fn child_storage_hash(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<Block::Hash>> {
Ok(self.state_at(id)?
.child_storage_hash(&storage_key.0, child_info, &key.0)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
)
}
/// Get the code at a given block.
pub fn code_at(&self, id: &BlockId<Block>) -> sp_blockchain::Result<Vec<u8>> {
Ok(self.storage(id, &StorageKey(well_known_keys::CODE.to_vec()))?
Ok(StorageProvider::storage(self, id, &StorageKey(well_known_keys::CODE.to_vec()))?
.expect("None is returned if there's no value stored for the given key;\
':code' key is always defined; qed").0)
}
@@ -446,57 +303,6 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
self.executor.runtime_version(id)
}
/// Get call executor reference.
pub fn executor(&self) -> &E {
&self.executor
}
/// Reads storage value at a given block + key, returning read proof.
pub fn read_proof<I>(&self, id: &BlockId<Block>, keys: I) -> sp_blockchain::Result<StorageProof> where
I: IntoIterator,
I::Item: AsRef<[u8]>,
{
self.state_at(id)
.and_then(|state| prove_read(state, keys)
.map_err(Into::into))
}
/// Reads child storage value at a given block + storage_key + key, returning
/// read proof.
pub fn read_child_proof<I>(
&self,
id: &BlockId<Block>,
storage_key: &[u8],
child_info: ChildInfo,
keys: I,
) -> sp_blockchain::Result<StorageProof> where
I: IntoIterator,
I::Item: AsRef<[u8]>,
{
self.state_at(id)
.and_then(|state| prove_child_read(state, storage_key, child_info, keys)
.map_err(Into::into))
}
/// Execute a call to a contract on top of state in a block of given hash
/// AND returning execution proof.
///
/// No changes are made.
pub fn execution_proof(&self,
id: &BlockId<Block>,
method: &str,
call_data: &[u8]
) -> sp_blockchain::Result<(Vec<u8>, StorageProof)> {
let state = self.state_at(id)?;
let header = self.prepare_environment_block(id)?;
prove_execution(state, header, &self.executor, method, call_data)
}
/// Reads given header and generates CHT-based header proof.
pub fn header_proof(&self, id: &BlockId<Block>) -> sp_blockchain::Result<(Block::Header, StorageProof)> {
self.header_proof_with_cht_size(id, cht::size())
}
/// Get block hash by number.
pub fn block_hash(&self,
block_number: <<Block as BlockT>::Header as HeaderT>::Number
@@ -531,112 +337,6 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
Ok((header, proof))
}
/// Get longest range within [first; last] that is possible to use in `key_changes`
/// and `key_changes_proof` calls.
/// Range could be shortened from the beginning if some changes tries have been pruned.
/// Returns Ok(None) if changes tries are not supported.
pub fn max_key_changes_range(
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
) -> sp_blockchain::Result<Option<(NumberFor<Block>, BlockId<Block>)>> {
let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?;
let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?;
if first > last_number {
return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into()));
}
let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() {
Some((storage, configs)) => (storage, configs),
None => return Ok(None),
};
let first_available_changes_trie = configs.last().map(|config| config.0);
match first_available_changes_trie {
Some(first_available_changes_trie) => {
let oldest_unpruned = storage.oldest_pruned_digest_range_end();
let first = std::cmp::max(first_available_changes_trie, oldest_unpruned);
Ok(Some((first, last)))
},
None => Ok(None)
}
}
/// Get pairs of (block, extrinsic) where key has been changed at given blocks range.
/// Works only for runtimes that are supporting changes tries.
///
/// Changes are returned in descending order (i.e. last block comes first).
pub fn key_changes(
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
storage_key: Option<&StorageKey>,
key: &StorageKey
) -> sp_blockchain::Result<Vec<(NumberFor<Block>, u32)>> {
let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?;
let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?;
let (storage, configs) = self.require_changes_trie(first, last_hash, true)?;
let mut result = Vec::new();
let best_number = self.backend.blockchain().info().best_number;
for (config_zero, config_end, config) in configs {
let range_first = ::std::cmp::max(first, config_zero + One::one());
let range_anchor = match config_end {
Some((config_end_number, config_end_hash)) => if last_number > config_end_number {
ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number }
} else {
ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }
},
None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number },
};
let config_range = ChangesTrieConfigurationRange {
config: &config,
zero: config_zero.clone(),
end: config_end.map(|(config_end_number, _)| config_end_number),
};
let result_range: Vec<(NumberFor<Block>, u32)> = key_changes::<HashFor<Block>, _>(
config_range,
storage.storage(),
range_first,
&range_anchor,
best_number,
storage_key.as_ref().map(|x| &x.0[..]),
&key.0)
.and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::<Result<_, _>>())
.map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?;
result.extend(result_range);
}
Ok(result)
}
/// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range.
/// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using
/// changes tries from ascendants of this block, we should provide proofs for changes tries roots
/// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants
/// of this block.
/// Works only for runtimes that are supporting changes tries.
pub fn key_changes_proof(
&self,
first: Block::Hash,
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
storage_key: Option<&StorageKey>,
key: &StorageKey,
) -> sp_blockchain::Result<ChangesProof<Block::Header>> {
self.key_changes_proof_with_cht_size(
first,
last,
min,
max,
storage_key,
key,
cht::size(),
)
}
/// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size.
pub fn key_changes_proof_with_cht_size(
&self,
@@ -1344,17 +1044,6 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
self.backend.blockchain().justification(*id)
}
/// Get full block by id.
pub fn block(&self, id: &BlockId<Block>)
-> sp_blockchain::Result<Option<SignedBlock<Block>>>
{
Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) {
(Some(header), Some(extrinsics), justification) =>
Some(SignedBlock { block: Block::new(header, extrinsics), justification }),
_ => None,
})
}
/// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors.
pub fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor<Block>) -> sp_blockchain::Result<Vec<Block::Hash>> {
let load_header = |id: Block::Hash| -> sp_blockchain::Result<Block::Header> {
@@ -1399,6 +1088,70 @@ impl<B, E, Block, RA> Client<B, E, Block, RA> where
}
}
impl<B, E, Block, RA> ProofProvider<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block>,
E: CallExecutor<Block>,
Block: BlockT,
{
fn read_proof(
&self,
id: &BlockId<Block>,
keys: &mut dyn Iterator<Item=&[u8]>,
) -> sp_blockchain::Result<StorageProof> {
self.state_at(id)
.and_then(|state| prove_read(state, keys)
.map_err(Into::into))
}
fn read_child_proof(
&self,
id: &BlockId<Block>,
storage_key: &[u8],
child_info: ChildInfo,
keys: &mut dyn Iterator<Item=&[u8]>,
) -> sp_blockchain::Result<StorageProof> {
self.state_at(id)
.and_then(|state| prove_child_read(state, storage_key, child_info, keys)
.map_err(Into::into))
}
fn execution_proof(
&self,
id: &BlockId<Block>,
method: &str,
call_data: &[u8]
) -> sp_blockchain::Result<(Vec<u8>, StorageProof)> {
let state = self.state_at(id)?;
let header = self.prepare_environment_block(id)?;
prove_execution(state, header, &self.executor, method, call_data)
}
fn header_proof(&self, id: &BlockId<Block>) -> sp_blockchain::Result<(Block::Header, StorageProof)> {
self.header_proof_with_cht_size(id, cht::size())
}
fn key_changes_proof(
&self,
first: Block::Hash,
last: Block::Hash,
min: Block::Hash,
max: Block::Hash,
storage_key: Option<&StorageKey>,
key: &StorageKey,
) -> sp_blockchain::Result<ChangesProof<Block::Header>> {
self.key_changes_proof_with_cht_size(
first,
last,
min,
max,
storage_key,
key,
cht::size(),
)
}
}
impl<B, E, Block, RA> BlockBuilderProvider<B, Block, Self> for Client<B, E, Block, RA>
where
B: backend::Backend<Block> + Send + Sync + 'static,
@@ -1425,6 +1178,196 @@ impl<B, E, Block, RA> BlockBuilderProvider<B, Block, Self> for Client<B, E, Bloc
}
}
impl<B, E, Block, RA> ExecutorProvider<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block>,
E: CallExecutor<Block>,
Block: BlockT,
{
type Executor = E;
fn executor(&self) -> &Self::Executor {
&self.executor
}
fn execution_extensions(&self) -> &ExecutionExtensions<Block> {
&self.execution_extensions
}
}
impl<B, E, Block, RA> StorageProvider<Block, B> for Client<B, E, Block, RA> where
B: backend::Backend<Block>,
E: CallExecutor<Block>,
Block: BlockT,
{
fn storage_keys(&self, id: &BlockId<Block>, key_prefix: &StorageKey) -> sp_blockchain::Result<Vec<StorageKey>> {
let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect();
Ok(keys)
}
fn storage_pairs(&self, id: &BlockId<Block>, key_prefix: &StorageKey)
-> sp_blockchain::Result<Vec<(StorageKey, StorageData)>>
{
let state = self.state_at(id)?;
let keys = state
.keys(&key_prefix.0)
.into_iter()
.map(|k| {
let d = state.storage(&k).ok().flatten().unwrap_or_default();
(StorageKey(k), StorageData(d))
})
.collect();
Ok(keys)
}
fn storage_keys_iter<'a>(
&self,
id: &BlockId<Block>,
prefix: Option<&'a StorageKey>,
start_key: Option<&StorageKey>
) -> sp_blockchain::Result<KeyIterator<'a, B::State, Block>> {
let state = self.state_at(id)?;
let start_key = start_key
.or(prefix)
.map(|key| key.0.clone())
.unwrap_or_else(Vec::new);
Ok(KeyIterator::new(state, prefix, start_key))
}
fn storage(&self, id: &BlockId<Block>, key: &StorageKey) -> sp_blockchain::Result<Option<StorageData>>
{
Ok(self.state_at(id)?
.storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
.map(StorageData)
)
}
fn storage_hash(&self, id: &BlockId<Block>, key: &StorageKey) -> sp_blockchain::Result<Option<Block::Hash>>
{
Ok(self.state_at(id)?
.storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
)
}
fn child_storage_keys(
&self,
id: &BlockId<Block>,
child_storage_key: &StorageKey,
child_info: ChildInfo,
key_prefix: &StorageKey
) -> sp_blockchain::Result<Vec<StorageKey>> {
let keys = self.state_at(id)?
.child_keys(&child_storage_key.0, child_info, &key_prefix.0)
.into_iter()
.map(StorageKey)
.collect();
Ok(keys)
}
fn child_storage(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<StorageData>> {
Ok(self.state_at(id)?
.child_storage(&storage_key.0, child_info, &key.0)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
.map(StorageData))
}
fn child_storage_hash(
&self,
id: &BlockId<Block>,
storage_key: &StorageKey,
child_info: ChildInfo,
key: &StorageKey
) -> sp_blockchain::Result<Option<Block::Hash>> {
Ok(self.state_at(id)?
.child_storage_hash(&storage_key.0, child_info, &key.0)
.map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?
)
}
fn max_key_changes_range(
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
) -> sp_blockchain::Result<Option<(NumberFor<Block>, BlockId<Block>)>> {
let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?;
let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?;
if first > last_number {
return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into()));
}
let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() {
Some((storage, configs)) => (storage, configs),
None => return Ok(None),
};
let first_available_changes_trie = configs.last().map(|config| config.0);
match first_available_changes_trie {
Some(first_available_changes_trie) => {
let oldest_unpruned = storage.oldest_pruned_digest_range_end();
let first = std::cmp::max(first_available_changes_trie, oldest_unpruned);
Ok(Some((first, last)))
},
None => Ok(None)
}
}
fn key_changes(
&self,
first: NumberFor<Block>,
last: BlockId<Block>,
storage_key: Option<&StorageKey>,
key: &StorageKey
) -> sp_blockchain::Result<Vec<(NumberFor<Block>, u32)>> {
let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?;
let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?;
let (storage, configs) = self.require_changes_trie(first, last_hash, true)?;
let mut result = Vec::new();
let best_number = self.backend.blockchain().info().best_number;
for (config_zero, config_end, config) in configs {
let range_first = ::std::cmp::max(first, config_zero + One::one());
let range_anchor = match config_end {
Some((config_end_number, config_end_hash)) => if last_number > config_end_number {
ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number }
} else {
ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }
},
None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number },
};
let config_range = ChangesTrieConfigurationRange {
config: &config,
zero: config_zero.clone(),
end: config_end.map(|(config_end_number, _)| config_end_number),
};
let result_range: Vec<(NumberFor<Block>, u32)> = key_changes::<HashFor<Block>, _>(
config_range,
storage.storage(),
range_first,
&range_anchor,
best_number,
storage_key.as_ref().map(|x| &x.0[..]),
&key.0)
.and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::<Result<_, _>>())
.map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?;
result.extend(result_range);
}
Ok(result)
}
}
impl<B, E, Block, RA> HeaderMetadata<Block> for Client<B, E, Block, RA> where
B: backend::Backend<Block>,
E: CallExecutor<Block>,
@@ -1902,6 +1845,15 @@ impl<B, E, Block, RA> BlockBody<Block> for Client<B, E, Block, RA>
) -> sp_blockchain::Result<Option<Vec<<Block as BlockT>::Extrinsic>>> {
self.body(id)
}
fn block(&self, id: &BlockId<Block>) -> sp_blockchain::Result<Option<SignedBlock<Block>>>
{
Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) {
(Some(header), Some(extrinsics), justification) =>
Some(SignedBlock { block: Block::new(header, extrinsics), justification }),
_ => None,
})
}
}
impl<B, E, Block, RA> backend::AuxStore for Client<B, E, Block, RA>
+2 -1
View File
@@ -40,7 +40,7 @@ use sp_blockchain::{Error as ClientError, Result as ClientResult};
use sc_client_api::{
backend::RemoteBackend,
light::RemoteCallRequest,
call_executor::CallExecutor
call_executor::CallExecutor,
};
use sc_executor::{RuntimeVersion, NativeVersion};
@@ -288,6 +288,7 @@ mod tests {
use sp_core::H256;
use sc_client_api::backend::{Backend, NewBlockState};
use crate::in_mem::Backend as InMemBackend;
use sc_client_api::ProofProvider;
use sp_runtime::traits::BlakeTwo256;
struct DummyCallExecutor;
+3 -2
View File
@@ -349,6 +349,7 @@ pub mod tests {
use sp_runtime::{generic::BlockId, traits::BlakeTwo256};
use sp_state_machine::Backend;
use super::*;
use sc_client_api::{StorageProvider, ProofProvider};
const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1");
@@ -378,7 +379,7 @@ pub mod tests {
.and_then(|v| Decode::decode(&mut &v.0[..]).ok()).unwrap();
let remote_read_proof = remote_client.read_proof(
&remote_block_id,
&[well_known_keys::HEAP_PAGES],
&mut std::iter::once(well_known_keys::HEAP_PAGES),
).unwrap();
// check remote read proof locally
@@ -426,7 +427,7 @@ pub mod tests {
&remote_block_id,
b":child_storage:default:child1",
CHILD_INFO_1,
&[b"key1"],
&mut std::iter::once("key1".as_bytes()),
).unwrap();
// check locally