Make chain && state RPCs async (#3480)

* chain+state RPCs are async now

* wrapped too long lines

* create full/light RPC impls from service

* use ordering

* post-merge fix
This commit is contained in:
Svyatoslav Nikolsky
2019-09-01 03:20:10 +03:00
committed by Gavin Wood
parent 816e132cd7
commit 607ee0a4e4
26 changed files with 1859 additions and 721 deletions
@@ -0,0 +1,79 @@
// Copyright 2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Blockchain API backend for full nodes.
use std::sync::Arc;
use rpc::futures::future::result;
use api::Subscriptions;
use client::{backend::Backend, CallExecutor, Client};
use primitives::{H256, Blake2Hasher};
use sr_primitives::{
generic::{BlockId, SignedBlock},
traits::{Block as BlockT},
};
use super::{ChainBackend, client_err, error::FutureResult};
/// Blockchain API backend for full nodes. Reads all the data from local database.
pub struct FullChain<B, E, Block: BlockT, RA> {
/// Substrate client.
client: Arc<Client<B, E, Block, RA>>,
/// Current subscriptions.
subscriptions: Subscriptions,
}
impl<B, E, Block: BlockT, RA> FullChain<B, E, Block, RA> {
/// Create new Chain API RPC handler.
pub fn new(client: Arc<Client<B, E, Block, RA>>, subscriptions: Subscriptions) -> Self {
Self {
client,
subscriptions,
}
}
}
impl<B, E, Block, RA> ChainBackend<B, E, Block, RA> for FullChain<B, E, Block, RA> where
Block: BlockT<Hash=H256> + 'static,
B: Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
RA: Send + Sync + 'static,
{
fn client(&self) -> &Arc<Client<B, E, Block, RA>> {
&self.client
}
fn subscriptions(&self) -> &Subscriptions {
&self.subscriptions
}
fn header(&self, hash: Option<Block::Hash>) -> FutureResult<Option<Block::Header>> {
Box::new(result(self.client
.header(&BlockId::Hash(self.unwrap_or_best(hash)))
.map_err(client_err)
))
}
fn block(&self, hash: Option<Block::Hash>)
-> FutureResult<Option<SignedBlock<Block>>>
{
Box::new(result(self.client
.block(&BlockId::Hash(self.unwrap_or_best(hash)))
.map_err(client_err)
))
}
}
+123
View File
@@ -0,0 +1,123 @@
// Copyright 2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! Blockchain API backend for light nodes.
use std::sync::Arc;
use futures03::{future::ready, FutureExt, TryFutureExt};
use rpc::futures::future::{result, Future, Either};
use api::Subscriptions;
use client::{
self, Client,
light::{
fetcher::{Fetcher, RemoteBodyRequest},
blockchain::RemoteBlockchain,
},
};
use primitives::{H256, Blake2Hasher};
use sr_primitives::{
generic::{BlockId, SignedBlock},
traits::{Block as BlockT},
};
use super::{ChainBackend, client_err, error::FutureResult};
/// Blockchain API backend for light nodes. Reads all the data from local
/// database, if available, or fetches it from remote node otherwise.
pub struct LightChain<B, E, Block: BlockT, RA, F> {
/// Substrate client.
client: Arc<Client<B, E, Block, RA>>,
/// Current subscriptions.
subscriptions: Subscriptions,
/// Remote blockchain reference
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
/// Remote fetcher reference.
fetcher: Arc<F>,
}
impl<B, E, Block: BlockT, RA, F: Fetcher<Block>> LightChain<B, E, Block, RA, F> {
/// Create new Chain API RPC handler.
pub fn new(
client: Arc<Client<B, E, Block, RA>>,
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
) -> Self {
Self {
client,
subscriptions,
remote_blockchain,
fetcher,
}
}
}
impl<B, E, Block, RA, F> ChainBackend<B, E, Block, RA> for LightChain<B, E, Block, RA, F> where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
RA: Send + Sync + 'static,
F: Fetcher<Block> + Send + Sync + 'static,
{
fn client(&self) -> &Arc<Client<B, E, Block, RA>> {
&self.client
}
fn subscriptions(&self) -> &Subscriptions {
&self.subscriptions
}
fn header(&self, hash: Option<Block::Hash>) -> FutureResult<Option<Block::Header>> {
let hash = self.unwrap_or_best(hash);
let fetcher = self.fetcher.clone();
let maybe_header = client::light::blockchain::future_header(
&*self.remote_blockchain,
&*fetcher,
BlockId::Hash(hash),
);
Box::new(maybe_header.then(move |result|
ready(result.map_err(client_err)),
).boxed().compat())
}
fn block(&self, hash: Option<Block::Hash>)
-> FutureResult<Option<SignedBlock<Block>>>
{
let fetcher = self.fetcher.clone();
let block = self.header(hash)
.and_then(move |header| match header {
Some(header) => Either::A(fetcher
.remote_body(RemoteBodyRequest {
header: header.clone(),
retry_count: Default::default(),
})
.boxed()
.compat()
.map(move |body| Some(SignedBlock {
block: Block::new(header, body),
justification: None,
}))
.map_err(client_err)
),
None => Either::B(result(Ok(None))),
});
Box::new(block)
}
}
+210 -101
View File
@@ -16,95 +16,181 @@
//! Substrate blockchain API.
mod chain_full;
mod chain_light;
#[cfg(test)]
mod tests;
use std::sync::Arc;
use futures03::{future, StreamExt as _, TryStreamExt as _};
use client::{self, Client, BlockchainEvents};
use rpc::Result as RpcResult;
use rpc::futures::{stream, Future, Sink, Stream};
use api::Subscriptions;
use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId};
use log::warn;
use rpc::{
Result as RpcResult,
futures::{stream, Future, Sink, Stream},
};
use api::Subscriptions;
use client::{
self, Client, BlockchainEvents,
light::{fetcher::Fetcher, blockchain::RemoteBlockchain},
};
use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId};
use primitives::{H256, Blake2Hasher};
use sr_primitives::generic::{BlockId, SignedBlock};
use sr_primitives::traits::{Block as BlockT, Header, NumberFor};
use self::error::{Error, Result};
use sr_primitives::{
generic::{BlockId, SignedBlock},
traits::{Block as BlockT, Header, NumberFor},
};
use self::error::{Result, Error, FutureResult};
pub use api::chain::*;
/// Chain API with subscriptions support.
pub struct Chain<B, E, Block: BlockT, RA> {
/// Substrate client.
client: Arc<Client<B, E, Block, RA>>,
/// Current subscriptions.
subscriptions: Subscriptions,
}
/// Blockchain backend API
trait ChainBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
{
/// Get client reference.
fn client(&self) -> &Arc<Client<B, E, Block, RA>>;
impl<B, E, Block: BlockT, RA> Chain<B, E, Block, RA> {
/// Create new Chain API RPC handler.
pub fn new(client: Arc<Client<B, E, Block, RA>>, subscriptions: Subscriptions) -> Self {
Self {
client,
subscriptions,
/// Get subscriptions reference.
fn subscriptions(&self) -> &Subscriptions;
/// Tries to unwrap passed block hash, or uses best block hash otherwise.
fn unwrap_or_best(&self, hash: Option<Block::Hash>) -> Block::Hash {
match hash.into() {
None => self.client().info().chain.best_hash,
Some(hash) => hash,
}
}
}
impl<B, E, Block, RA> Chain<B, E, Block, RA> where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
RA: Send + Sync + 'static
{
fn unwrap_or_best(&self, hash: Option<Block::Hash>) -> Result<Block::Hash> {
Ok(match hash.into() {
None => self.client.info().chain.best_hash,
Some(hash) => hash,
/// Get header of a relay chain block.
fn header(&self, hash: Option<Block::Hash>) -> FutureResult<Option<Block::Header>>;
/// Get header and body of a relay chain block.
fn block(&self, hash: Option<Block::Hash>) -> FutureResult<Option<SignedBlock<Block>>>;
/// Get hash of the n-th block in the canon chain.
///
/// By default returns latest block hash.
fn block_hash(
&self,
number: Option<number::NumberOrHex<NumberFor<Block>>>,
) -> Result<Option<Block::Hash>> {
Ok(match number {
None => Some(self.client().info().chain.best_hash),
Some(num_or_hex) => self.client()
.header(&BlockId::number(num_or_hex.to_number()?))
.map_err(client_err)?
.map(|h| h.hash()),
})
}
fn subscribe_headers<F, G, S, ERR>(
/// Get hash of the last finalized block in the canon chain.
fn finalized_head(&self) -> Result<Block::Hash> {
Ok(self.client().info().chain.finalized_hash)
}
/// New head subscription
fn subscribe_new_heads(
&self,
_metadata: crate::metadata::Metadata,
subscriber: Subscriber<Block::Header>,
best_block_hash: G,
stream: F,
) where
F: FnOnce() -> S,
G: FnOnce() -> Result<Option<Block::Hash>>,
ERR: ::std::fmt::Debug,
S: Stream<Item=Block::Header, Error=ERR> + Send + 'static,
{
self.subscriptions.add(subscriber, |sink| {
// send current head right at the start.
let header = best_block_hash()
.and_then(|hash| self.header(hash.into()))
.and_then(|header| {
header.ok_or_else(|| "Best header missing.".to_owned().into())
})
.map_err(Into::into);
) {
subscribe_headers(
self.client(),
self.subscriptions(),
subscriber,
|| self.client().info().chain.best_hash,
|| self.client().import_notification_stream()
.filter(|notification| future::ready(notification.is_new_best))
.map(|notification| Ok::<_, ()>(notification.header))
.compat(),
)
}
// send further subscriptions
let stream = stream()
.map(|res| Ok(res))
.map_err(|e| warn!("Block notification stream error: {:?}", e));
/// Unsubscribe from new head subscription.
fn unsubscribe_new_heads(
&self,
_metadata: Option<crate::metadata::Metadata>,
id: SubscriptionId,
) -> RpcResult<bool> {
Ok(self.subscriptions().cancel(id))
}
sink
.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))
.send_all(
stream::iter_result(vec![Ok(header)])
.chain(stream)
)
// we ignore the resulting Stream (if the first stream is over we are unsubscribed)
.map(|_| ())
});
/// New head subscription
fn subscribe_finalized_heads(
&self,
_metadata: crate::metadata::Metadata,
subscriber: Subscriber<Block::Header>,
) {
subscribe_headers(
self.client(),
self.subscriptions(),
subscriber,
|| self.client().info().chain.finalized_hash,
|| self.client().finality_notification_stream()
.map(|notification| Ok::<_, ()>(notification.header))
.compat(),
)
}
/// Unsubscribe from new head subscription.
fn unsubscribe_finalized_heads(
&self,
_metadata: Option<crate::metadata::Metadata>,
id: SubscriptionId,
) -> RpcResult<bool> {
Ok(self.subscriptions().cancel(id))
}
}
fn client_error(err: client::error::Error) -> Error {
Error::Client(Box::new(err))
/// Create new state API that works on full node.
pub fn new_full<B, E, Block: BlockT, RA>(
client: Arc<Client<B, E, Block, RA>>,
subscriptions: Subscriptions,
) -> Chain<B, E, Block, RA>
where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
{
Chain {
backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)),
}
}
/// Create new state API that works on light node.
pub fn new_light<B, E, Block: BlockT, RA, F: Fetcher<Block>>(
client: Arc<Client<B, E, Block, RA>>,
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
) -> Chain<B, E, Block, RA>
where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
F: Send + Sync + 'static,
{
Chain {
backend: Box::new(self::chain_light::LightChain::new(
client,
subscriptions,
remote_blockchain,
fetcher,
)),
}
}
/// Chain API with subscriptions support.
pub struct Chain<B, E, Block: BlockT, RA> {
backend: Box<dyn ChainBackend<B, E, Block, RA>>,
}
impl<B, E, Block, RA> ChainApi<NumberFor<Block>, Block::Hash, Block::Header, SignedBlock<Block>> for Chain<B, E, Block, RA> where
@@ -115,58 +201,81 @@ impl<B, E, Block, RA> ChainApi<NumberFor<Block>, Block::Hash, Block::Header, Sig
{
type Metadata = crate::metadata::Metadata;
fn header(&self, hash: Option<Block::Hash>) -> Result<Option<Block::Header>> {
let hash = self.unwrap_or_best(hash)?;
Ok(self.client.header(&BlockId::Hash(hash)).map_err(client_error)?)
fn header(&self, hash: Option<Block::Hash>) -> FutureResult<Option<Block::Header>> {
self.backend.header(hash)
}
fn block(&self, hash: Option<Block::Hash>)
-> Result<Option<SignedBlock<Block>>>
fn block(&self, hash: Option<Block::Hash>) -> FutureResult<Option<SignedBlock<Block>>>
{
let hash = self.unwrap_or_best(hash)?;
Ok(self.client.block(&BlockId::Hash(hash)).map_err(client_error)?)
self.backend.block(hash)
}
fn block_hash(&self, number: Option<number::NumberOrHex<NumberFor<Block>>>) -> Result<Option<Block::Hash>> {
Ok(match number {
None => Some(self.client.info().chain.best_hash),
Some(num_or_hex) => self.client
.header(&BlockId::number(num_or_hex.to_number()?))
.map_err(client_error)?
.map(|h| h.hash()),
})
self.backend.block_hash(number)
}
fn finalized_head(&self) -> Result<Block::Hash> {
Ok(self.client.info().chain.finalized_hash)
self.backend.finalized_head()
}
fn subscribe_new_heads(&self, _metadata: Self::Metadata, subscriber: Subscriber<Block::Header>) {
self.subscribe_headers(
subscriber,
|| self.block_hash(None.into()),
|| self.client.import_notification_stream()
.filter(|notification| future::ready(notification.is_new_best))
.map(|notification| Ok::<_, ()>(notification.header))
.compat(),
)
fn subscribe_new_heads(&self, metadata: Self::Metadata, subscriber: Subscriber<Block::Header>) {
self.backend.subscribe_new_heads(metadata, subscriber)
}
fn unsubscribe_new_heads(&self, _metadata: Option<Self::Metadata>, id: SubscriptionId) -> RpcResult<bool> {
Ok(self.subscriptions.cancel(id))
fn unsubscribe_new_heads(&self, metadata: Option<Self::Metadata>, id: SubscriptionId) -> RpcResult<bool> {
self.backend.unsubscribe_new_heads(metadata, id)
}
fn subscribe_finalized_heads(&self, _meta: Self::Metadata, subscriber: Subscriber<Block::Header>) {
self.subscribe_headers(
subscriber,
|| Ok(Some(self.client.info().chain.finalized_hash)),
|| self.client.finality_notification_stream()
.map(|notification| Ok::<_, ()>(notification.header))
.compat(),
)
fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber<Block::Header>) {
self.backend.subscribe_finalized_heads(metadata, subscriber)
}
fn unsubscribe_finalized_heads(&self, _metadata: Option<Self::Metadata>, id: SubscriptionId) -> RpcResult<bool> {
Ok(self.subscriptions.cancel(id))
fn unsubscribe_finalized_heads(&self, metadata: Option<Self::Metadata>, id: SubscriptionId) -> RpcResult<bool> {
self.backend.unsubscribe_finalized_heads(metadata, id)
}
}
/// Subscribe to new headers.
fn subscribe_headers<B, E, Block, RA, F, G, S, ERR>(
client: &Arc<Client<B, E, Block, RA>>,
subscriptions: &Subscriptions,
subscriber: Subscriber<Block::Header>,
best_block_hash: G,
stream: F,
) where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
F: FnOnce() -> S,
G: FnOnce() -> Block::Hash,
ERR: ::std::fmt::Debug,
S: Stream<Item=Block::Header, Error=ERR> + Send + 'static,
{
subscriptions.add(subscriber, |sink| {
// send current head right at the start.
let header = client.header(&BlockId::Hash(best_block_hash()))
.map_err(client_err)
.and_then(|header| {
header.ok_or_else(|| "Best header missing.".to_owned().into())
})
.map_err(Into::into);
// send further subscriptions
let stream = stream()
.map(|res| Ok(res))
.map_err(|e| warn!("Block notification stream error: {:?}", e));
sink
.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))
.send_all(
stream::iter_result(vec![Ok(header)])
.chain(stream)
)
// we ignore the resulting Stream (if the first stream is over we are unsubscribed)
.map(|_| ())
});
}
fn client_err(err: client::error::Error) -> Error {
Error::Client(Box::new(err))
}
+48 -60
View File
@@ -27,13 +27,11 @@ fn should_return_header() {
let core = ::tokio::runtime::Runtime::new().unwrap();
let remote = core.executor();
let client = Chain {
client: Arc::new(test_client::new()),
subscriptions: Subscriptions::new(Arc::new(remote)),
};
let client = Arc::new(test_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
assert_matches!(
client.header(Some(client.client.genesis_hash()).into()),
api.header(Some(client.genesis_hash()).into()).wait(),
Ok(Some(ref x)) if x == &Header {
parent_hash: H256::from_low_u64_be(0),
number: 0,
@@ -44,7 +42,7 @@ fn should_return_header() {
);
assert_matches!(
client.header(None.into()),
api.header(None.into()).wait(),
Ok(Some(ref x)) if x == &Header {
parent_hash: H256::from_low_u64_be(0),
number: 0,
@@ -55,7 +53,7 @@ fn should_return_header() {
);
assert_matches!(
client.header(Some(H256::from_low_u64_be(5)).into()),
api.header(Some(H256::from_low_u64_be(5)).into()).wait(),
Ok(None)
);
}
@@ -65,26 +63,24 @@ fn should_return_a_block() {
let core = ::tokio::runtime::Runtime::new().unwrap();
let remote = core.executor();
let api = Chain {
client: Arc::new(test_client::new()),
subscriptions: Subscriptions::new(Arc::new(remote)),
};
let client = Arc::new(test_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
let block = api.client.new_block(Default::default()).unwrap().bake().unwrap();
let block = client.new_block(Default::default()).unwrap().bake().unwrap();
let block_hash = block.hash();
api.client.import(BlockOrigin::Own, block).unwrap();
client.import(BlockOrigin::Own, block).unwrap();
// Genesis block is not justified
assert_matches!(
api.block(Some(api.client.genesis_hash()).into()),
api.block(Some(client.genesis_hash()).into()).wait(),
Ok(Some(SignedBlock { justification: None, .. }))
);
assert_matches!(
api.block(Some(block_hash).into()),
api.block(Some(block_hash).into()).wait(),
Ok(Some(ref x)) if x.block == Block {
header: Header {
parent_hash: api.client.genesis_hash(),
parent_hash: client.genesis_hash(),
number: 1,
state_root: x.block.header.state_root.clone(),
extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(),
@@ -95,10 +91,10 @@ fn should_return_a_block() {
);
assert_matches!(
api.block(None.into()),
api.block(None.into()).wait(),
Ok(Some(ref x)) if x.block == Block {
header: Header {
parent_hash: api.client.genesis_hash(),
parent_hash: client.genesis_hash(),
number: 1,
state_root: x.block.header.state_root.clone(),
extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(),
@@ -109,7 +105,7 @@ fn should_return_a_block() {
);
assert_matches!(
api.block(Some(H256::from_low_u64_be(5)).into()),
api.block(Some(H256::from_low_u64_be(5)).into()).wait(),
Ok(None)
);
}
@@ -119,40 +115,38 @@ fn should_return_block_hash() {
let core = ::tokio::runtime::Runtime::new().unwrap();
let remote = core.executor();
let client = Chain {
client: Arc::new(test_client::new()),
subscriptions: Subscriptions::new(Arc::new(remote)),
};
let client = Arc::new(test_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
assert_matches!(
client.block_hash(None.into()),
Ok(Some(ref x)) if x == &client.client.genesis_hash()
api.block_hash(None.into()),
Ok(Some(ref x)) if x == &client.genesis_hash()
);
assert_matches!(
client.block_hash(Some(0u64.into()).into()),
Ok(Some(ref x)) if x == &client.client.genesis_hash()
api.block_hash(Some(0u64.into()).into()),
Ok(Some(ref x)) if x == &client.genesis_hash()
);
assert_matches!(
client.block_hash(Some(1u64.into()).into()),
api.block_hash(Some(1u64.into()).into()),
Ok(None)
);
let block = client.client.new_block(Default::default()).unwrap().bake().unwrap();
client.client.import(BlockOrigin::Own, block.clone()).unwrap();
let block = client.new_block(Default::default()).unwrap().bake().unwrap();
client.import(BlockOrigin::Own, block.clone()).unwrap();
assert_matches!(
client.block_hash(Some(0u64.into()).into()),
Ok(Some(ref x)) if x == &client.client.genesis_hash()
api.block_hash(Some(0u64.into()).into()),
Ok(Some(ref x)) if x == &client.genesis_hash()
);
assert_matches!(
client.block_hash(Some(1u64.into()).into()),
api.block_hash(Some(1u64.into()).into()),
Ok(Some(ref x)) if x == &block.hash()
);
assert_matches!(
client.block_hash(Some(::primitives::U256::from(1u64).into()).into()),
api.block_hash(Some(::primitives::U256::from(1u64).into()).into()),
Ok(Some(ref x)) if x == &block.hash()
);
}
@@ -163,30 +157,28 @@ fn should_return_finalized_hash() {
let core = ::tokio::runtime::Runtime::new().unwrap();
let remote = core.executor();
let client = Chain {
client: Arc::new(test_client::new()),
subscriptions: Subscriptions::new(Arc::new(remote)),
};
let client = Arc::new(test_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
assert_matches!(
client.finalized_head(),
Ok(ref x) if x == &client.client.genesis_hash()
api.finalized_head(),
Ok(ref x) if x == &client.genesis_hash()
);
// import new block
let builder = client.client.new_block(Default::default()).unwrap();
client.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
let builder = client.new_block(Default::default()).unwrap();
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
// no finalization yet
assert_matches!(
client.finalized_head(),
Ok(ref x) if x == &client.client.genesis_hash()
api.finalized_head(),
Ok(ref x) if x == &client.genesis_hash()
);
// finalize
client.client.finalize_block(BlockId::number(1), None).unwrap();
client.finalize_block(BlockId::number(1), None).unwrap();
assert_matches!(
client.finalized_head(),
Ok(ref x) if x == &client.client.block_hash(1).unwrap().unwrap()
api.finalized_head(),
Ok(ref x) if x == &client.block_hash(1).unwrap().unwrap()
);
}
@@ -197,18 +189,16 @@ fn should_notify_about_latest_block() {
let (subscriber, id, transport) = Subscriber::new_test("test");
{
let api = Chain {
client: Arc::new(test_client::new()),
subscriptions: Subscriptions::new(Arc::new(remote)),
};
let client = Arc::new(test_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
api.subscribe_new_heads(Default::default(), subscriber);
// assert id assigned
assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1))));
let builder = api.client.new_block(Default::default()).unwrap();
api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
let builder = client.new_block(Default::default()).unwrap();
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
}
// assert initial head sent.
@@ -228,19 +218,17 @@ fn should_notify_about_finalized_block() {
let (subscriber, id, transport) = Subscriber::new_test("test");
{
let api = Chain {
client: Arc::new(test_client::new()),
subscriptions: Subscriptions::new(Arc::new(remote)),
};
let client = Arc::new(test_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
api.subscribe_finalized_heads(Default::default(), subscriber);
// assert id assigned
assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1))));
let builder = api.client.new_block(Default::default()).unwrap();
api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
api.client.finalize_block(BlockId::number(1), None).unwrap();
let builder = client.new_block(Default::default()).unwrap();
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
client.finalize_block(BlockId::number(1), None).unwrap();
}
// assert initial head sent.
+340 -401
View File
@@ -16,426 +16,169 @@
//! Substrate state API.
mod state_full;
mod state_light;
#[cfg(test)]
mod tests;
use std::{
collections::{BTreeMap, HashMap},
ops::Range,
sync::Arc,
};
use std::sync::Arc;
use futures03::{future, StreamExt as _, TryStreamExt as _};
use client::{self, Client, CallExecutor, BlockchainEvents, runtime_api::Metadata};
use rpc::Result as RpcResult;
use rpc::futures::{stream, Future, Sink, Stream};
use api::Subscriptions;
use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId};
use log::{warn, trace};
use primitives::hexdisplay::HexDisplay;
use primitives::storage::{self, StorageKey, StorageData, StorageChangeSet};
use primitives::{H256, Blake2Hasher, Bytes};
use sr_primitives::generic::BlockId;
use sr_primitives::traits::{
Block as BlockT, Header, ProvideRuntimeApi, NumberFor,
SaturatedConversion
use log::warn;
use rpc::{
Result as RpcResult,
futures::{stream, Future, Sink, Stream},
};
use api::Subscriptions;
use client::{
BlockchainEvents, Client, CallExecutor,
runtime_api::Metadata,
light::{blockchain::RemoteBlockchain, fetcher::Fetcher},
};
use primitives::{
Blake2Hasher, Bytes, H256,
storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet},
};
use runtime_version::RuntimeVersion;
use self::error::{Error, Result};
use state_machine::{self, ExecutionStrategy};
use sr_primitives::{
generic::BlockId,
traits::{Block as BlockT, ProvideRuntimeApi},
};
use self::error::{Error, FutureResult};
pub use api::state::*;
/// State API with subscriptions support.
pub struct State<B, E, Block: BlockT, RA> {
/// Substrate client.
client: Arc<Client<B, E, Block, RA>>,
/// Current subscriptions.
subscriptions: Subscriptions,
}
/// Ranges to query in state_queryStorage.
struct QueryStorageRange<Block: BlockT> {
/// Hashes of all the blocks in the range.
pub hashes: Vec<Block::Hash>,
/// Number of the first block in the range.
pub first_number: NumberFor<Block>,
/// Blocks subrange ([begin; end) indices within `hashes`) where we should read keys at
/// each state to get changes.
pub unfiltered_range: Range<usize>,
/// Blocks subrange ([begin; end) indices within `hashes`) where we could pre-filter
/// blocks-with-changes by using changes tries.
pub filtered_range: Option<Range<usize>>,
}
fn client_err(err: client::error::Error) -> Error {
Error::Client(Box::new(err))
}
impl<B, E, Block: BlockT, RA> State<B, E, Block, RA> where
Block: BlockT<Hash=H256>,
B: client::backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
/// State backend API.
pub trait StateBackend<B, E, Block: BlockT, RA>: Send + Sync + 'static
where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: client::CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static,
RA: Send + Sync + 'static,
{
/// Create new State API RPC handler.
pub fn new(client: Arc<Client<B, E, Block, RA>>, subscriptions: Subscriptions) -> Self {
Self {
client,
subscriptions,
}
}
/// Get client reference.
fn client(&self) -> &Arc<Client<B, E, Block, RA>>;
/// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges.
/// Blocks that contain changes within filtered subrange could be filtered using changes tries.
/// Blocks that contain changes within unfiltered subrange must be filtered manually.
fn split_query_storage_range(
/// Get subscriptions reference.
fn subscriptions(&self) -> &Subscriptions;
/// Call runtime method at given block.
fn call(
&self,
from: Block::Hash,
to: Option<Block::Hash>
) -> Result<QueryStorageRange<Block>> {
let to = self.unwrap_or_best(to)?;
let from_hdr = self.client.header(&BlockId::hash(from)).map_err(client_err)?;
let to_hdr = self.client.header(&BlockId::hash(to)).map_err(client_err)?;
match (from_hdr, to_hdr) {
(Some(ref from), Some(ref to)) if from.number() <= to.number() => {
// check if we can get from `to` to `from` by going through parent_hashes.
let from_number = *from.number();
let blocks = {
let mut blocks = vec![to.hash()];
let mut last = to.clone();
while *last.number() > from_number {
let hdr = self.client
.header(&BlockId::hash(*last.parent_hash()))
.map_err(client_err)?;
if let Some(hdr) = hdr {
blocks.push(hdr.hash());
last = hdr;
} else {
return Err(invalid_block_range(
Some(from),
Some(to),
format!("Parent of {} ({}) not found", last.number(), last.hash()),
))
}
}
if last.hash() != from.hash() {
return Err(invalid_block_range(
Some(from),
Some(to),
format!("Expected to reach `from`, got {} ({})", last.number(), last.hash()),
))
}
blocks.reverse();
blocks
};
// check if we can filter blocks-with-changes from some (sub)range using changes tries
let changes_trie_range = self.client
.max_key_changes_range(from_number, BlockId::Hash(to.hash()))
.map_err(client_err)?;
let filtered_range_begin = changes_trie_range.map(|(begin, _)| (begin - from_number).saturated_into::<usize>());
let (unfiltered_range, filtered_range) = split_range(blocks.len(), filtered_range_begin);
Ok(QueryStorageRange {
hashes: blocks,
first_number: from_number,
unfiltered_range,
filtered_range,
})
},
(from, to) => Err(
invalid_block_range(from.as_ref(), to.as_ref(), "Invalid range or unknown block".into())
),
}
}
block: Option<Block::Hash>,
method: String,
call_data: Bytes,
) -> FutureResult<Bytes>;
/// Iterates through range.unfiltered_range and check each block for changes of keys' values.
fn query_storage_unfiltered(
/// Returns the keys with prefix, leave empty to get all the keys.
fn storage_keys(
&self,
range: &QueryStorageRange<Block>,
keys: &[StorageKey],
last_values: &mut HashMap<StorageKey, Option<StorageData>>,
changes: &mut Vec<StorageChangeSet<Block::Hash>>,
) -> Result<()> {
for block in range.unfiltered_range.start..range.unfiltered_range.end {
let block_hash = range.hashes[block].clone();
let mut block_changes = StorageChangeSet { block: block_hash.clone(), changes: Vec::new() };
let id = BlockId::hash(block_hash);
for key in keys {
let (has_changed, data) = {
let curr_data = self.client.storage(&id, key)
.map_err(client_err)?;
match last_values.get(key) {
Some(prev_data) => (curr_data != *prev_data, curr_data),
None => (true, curr_data),
}
};
if has_changed {
block_changes.changes.push((key.clone(), data.clone()));
}
last_values.insert(key.clone(), data);
}
if !block_changes.changes.is_empty() {
changes.push(block_changes);
}
}
Ok(())
}
block: Option<Block::Hash>,
prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>>;
/// Iterates through all blocks that are changing keys within range.filtered_range and collects these changes.
fn query_storage_filtered(
/// Returns a storage entry at a specific block's state.
fn storage(
&self,
range: &QueryStorageRange<Block>,
keys: &[StorageKey],
last_values: &HashMap<StorageKey, Option<StorageData>>,
changes: &mut Vec<StorageChangeSet<Block::Hash>>,
) -> Result<()> {
let (begin, end) = match range.filtered_range {
Some(ref filtered_range) => (
range.first_number + filtered_range.start.saturated_into(),
BlockId::Hash(range.hashes[filtered_range.end - 1].clone())
),
None => return Ok(()),
};
let mut changes_map: BTreeMap<NumberFor<Block>, StorageChangeSet<Block::Hash>> = BTreeMap::new();
for key in keys {
let mut last_block = None;
let mut last_value = last_values.get(key).cloned().unwrap_or_default();
for (block, _) in self.client.key_changes(begin, end, key).map_err(client_err)?.into_iter().rev() {
if last_block == Some(block) {
continue;
}
let block_hash = range.hashes[(block - range.first_number).saturated_into::<usize>()].clone();
let id = BlockId::Hash(block_hash);
let value_at_block = self.client.storage(&id, key).map_err(client_err)?;
if last_value == value_at_block {
continue;
}
changes_map.entry(block)
.or_insert_with(|| StorageChangeSet { block: block_hash, changes: Vec::new() })
.changes.push((key.clone(), value_at_block.clone()));
last_block = Some(block);
last_value = value_at_block;
}
}
if let Some(additional_capacity) = changes_map.len().checked_sub(changes.len()) {
changes.reserve(additional_capacity);
}
changes.extend(changes_map.into_iter().map(|(_, cs)| cs));
Ok(())
}
}
impl<B, E, Block, RA> State<B, E, Block, RA> where
Block: BlockT<Hash=H256>,
B: client::backend::Backend<Block, Blake2Hasher>,
E: CallExecutor<Block, Blake2Hasher>,
{
fn unwrap_or_best(&self, hash: Option<Block::Hash>) -> Result<Block::Hash> {
crate::helpers::unwrap_or_else(|| Ok(self.client.info().chain.best_hash), hash)
}
}
impl<B, E, Block, RA> StateApi<Block::Hash> for State<B, E, Block, RA> where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client<B, E, Block, RA>: ProvideRuntimeApi,
<Client<B, E, Block, RA> as ProvideRuntimeApi>::Api: Metadata<Block>
{
type Metadata = crate::metadata::Metadata;
fn call(&self, method: String, data: Bytes, block: Option<Block::Hash>) -> Result<Bytes> {
let block = self.unwrap_or_best(block)?;
trace!(target: "rpc", "Calling runtime at {:?} for method {} ({})", block, method, HexDisplay::from(&data.0));
let return_data = self.client
.executor()
.call(
&BlockId::Hash(block),
&method, &data.0, ExecutionStrategy::NativeElseWasm, state_machine::NeverOffchainExt::new(),
)
.map_err(client_err)?;
Ok(Bytes(return_data))
}
fn storage_keys(&self, key_prefix: StorageKey, block: Option<Block::Hash>) -> Result<Vec<StorageKey>> {
let block = self.unwrap_or_best(block)?;
trace!(target: "rpc", "Querying storage keys at {:?}", block);
Ok(self.client.storage_keys(&BlockId::Hash(block), &key_prefix).map_err(client_err)?)
}
fn storage(&self, key: StorageKey, block: Option<Block::Hash>) -> Result<Option<StorageData>> {
let block = self.unwrap_or_best(block)?;
trace!(target: "rpc", "Querying storage at {:?} for key {}", block, HexDisplay::from(&key.0));
Ok(self.client.storage(&BlockId::Hash(block), &key).map_err(client_err)?)
}
fn storage_hash(&self, key: StorageKey, block: Option<Block::Hash>) -> Result<Option<Block::Hash>> {
let block = self.unwrap_or_best(block)?;
trace!(target: "rpc", "Querying storage hash at {:?} for key {}", block, HexDisplay::from(&key.0));
Ok(self.client.storage_hash(&BlockId::Hash(block), &key).map_err(client_err)?)
}
fn storage_size(&self, key: StorageKey, block: Option<Block::Hash>) -> Result<Option<u64>> {
Ok(self.storage(key, block)?.map(|x| x.0.len() as u64))
}
fn child_storage(
&self,
child_storage_key: StorageKey,
block: Option<Block::Hash>,
key: StorageKey,
block: Option<Block::Hash>
) -> Result<Option<StorageData>> {
let block = self.unwrap_or_best(block)?;
trace!(target: "rpc", "Querying child storage at {:?} for key {}", block, HexDisplay::from(&key.0));
Ok(self.client
.child_storage(&BlockId::Hash(block), &child_storage_key, &key)
.map_err(client_err)?
)
) -> FutureResult<Option<StorageData>>;
/// Returns the hash of a storage entry at a block's state.
fn storage_hash(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>>;
/// Returns the size of a storage entry at a block's state.
fn storage_size(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> FutureResult<Option<u64>> {
Box::new(self.storage(block, key)
.map(|x| x.map(|x| x.0.len() as u64)))
}
/// Returns the keys with prefix from a child storage, leave empty to get all the keys
fn child_storage_keys(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
key_prefix: StorageKey,
block: Option<Block::Hash>
) -> Result<Vec<StorageKey>> {
let block = self.unwrap_or_best(block)?;
trace!(target: "rpc", "Querying child storage keys at {:?}", block);
Ok(self.client
.child_storage_keys(&BlockId::Hash(block), &child_storage_key, &key_prefix)
.map_err(client_err)?
)
}
prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>>;
/// Returns a child storage entry at a specific block's state.
fn child_storage(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
key: StorageKey,
) -> FutureResult<Option<StorageData>>;
/// Returns the hash of a child storage entry at a block's state.
fn child_storage_hash(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
key: StorageKey,
block: Option<Block::Hash>
) -> Result<Option<Block::Hash>> {
let block = self.unwrap_or_best(block)?;
trace!(
target: "rpc", "Querying child storage hash at {:?} for key {}",
block,
HexDisplay::from(&key.0),
);
Ok(self.client
.child_storage_hash(&BlockId::Hash(block), &child_storage_key, &key)
.map_err(client_err)?
)
}
) -> FutureResult<Option<Block::Hash>>;
/// Returns the size of a child storage entry at a block's state.
fn child_storage_size(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
key: StorageKey,
block: Option<Block::Hash>
) -> Result<Option<u64>> {
Ok(self.child_storage(child_storage_key, key, block)?.map(|x| x.0.len() as u64))
) -> FutureResult<Option<u64>> {
Box::new(self.child_storage(block, child_storage_key, key)
.map(|x| x.map(|x| x.0.len() as u64)))
}
fn metadata(&self, block: Option<Block::Hash>) -> Result<Bytes> {
let block = self.unwrap_or_best(block)?;
self.client
.runtime_api()
.metadata(&BlockId::Hash(block))
.map(Into::into)
.map_err(client_err)
}
/// Returns the runtime metadata as an opaque blob.
fn metadata(&self, block: Option<Block::Hash>) -> FutureResult<Bytes>;
/// Get the runtime version.
fn runtime_version(&self, block: Option<Block::Hash>) -> FutureResult<RuntimeVersion>;
/// Query historical storage entries (by key) starting from a block given as the second parameter.
///
/// NOTE This first returned result contains the initial state of storage for all keys.
/// Subsequent values in the vector represent changes to the previous state (diffs).
fn query_storage(
&self,
keys: Vec<StorageKey>,
from: Block::Hash,
to: Option<Block::Hash>
) -> Result<Vec<StorageChangeSet<Block::Hash>>> {
let range = self.split_query_storage_range(from, to)?;
let mut changes = Vec::new();
let mut last_values = HashMap::new();
self.query_storage_unfiltered(&range, &keys, &mut last_values, &mut changes)?;
self.query_storage_filtered(&range, &keys, &last_values, &mut changes)?;
Ok(changes)
}
to: Option<Block::Hash>,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<StorageChangeSet<Block::Hash>>>;
fn subscribe_storage(
/// New runtime version subscription
fn subscribe_runtime_version(
&self,
_meta: Self::Metadata,
subscriber: Subscriber<StorageChangeSet<Block::Hash>>,
keys: Option<Vec<StorageKey>>
_meta: crate::metadata::Metadata,
subscriber: Subscriber<RuntimeVersion>,
) {
let keys = Into::<Option<Vec<_>>>::into(keys);
let stream = match self.client.storage_changes_notification_stream(
keys.as_ref().map(|x| &**x),
None
) {
Ok(stream) => stream,
Err(err) => {
let _ = subscriber.reject(client_err(err).into());
return;
},
};
// initial values
let initial = stream::iter_result(keys
.map(|keys| {
let block = self.client.info().chain.best_hash;
let changes = keys
.into_iter()
.map(|key| self.storage(key.clone(), Some(block.clone()).into())
.map(|val| (key.clone(), val))
.unwrap_or_else(|_| (key, None))
)
.collect();
vec![Ok(Ok(StorageChangeSet { block, changes }))]
}).unwrap_or_default());
self.subscriptions.add(subscriber, |sink| {
let stream = stream
.map(|(block, changes)| Ok::<_, ()>(Ok(StorageChangeSet {
block,
changes: changes.iter()
.filter_map(|(o_sk, k, v)| if o_sk.is_none() {
Some((k.clone(),v.cloned()))
} else { None }).collect(),
})))
.compat();
sink
.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))
.send_all(initial.chain(stream))
// we ignore the resulting Stream (if the first stream is over we are unsubscribed)
.map(|_| ())
})
}
fn unsubscribe_storage(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> RpcResult<bool> {
Ok(self.subscriptions.cancel(id))
}
fn runtime_version(&self, at: Option<Block::Hash>) -> Result<RuntimeVersion> {
let at = self.unwrap_or_best(at)?;
Ok(self.client.runtime_version_at(&BlockId::Hash(at)).map_err(client_err)?)
}
fn subscribe_runtime_version(&self, _meta: Self::Metadata, subscriber: Subscriber<RuntimeVersion>) {
let stream = match self.client.storage_changes_notification_stream(
Some(&[StorageKey(storage::well_known_keys::CODE.to_vec())]),
let stream = match self.client().storage_changes_notification_stream(
Some(&[StorageKey(well_known_keys::CODE.to_vec())]),
None,
) {
Ok(stream) => stream,
Err(err) => {
let _ = subscriber.reject(client_err(err).into());
let _ = subscriber.reject(Error::from(client_err(err)).into());
return;
}
};
self.subscriptions.add(subscriber, |sink| {
self.subscriptions().add(subscriber, |sink| {
let version = self.runtime_version(None.into())
.map_err(Into::into);
.map_err(Into::into)
.wait();
let client = self.client.clone();
let client = self.client().clone();
let mut previous_version = version.clone();
let stream = stream
@@ -465,41 +208,237 @@ impl<B, E, Block, RA> StateApi<Block::Hash> for State<B, E, Block, RA> where
});
}
fn unsubscribe_runtime_version(&self, _meta: Option<Self::Metadata>, id: SubscriptionId) -> RpcResult<bool> {
Ok(self.subscriptions.cancel(id))
/// Unsubscribe from runtime version subscription
fn unsubscribe_runtime_version(
&self,
_meta: Option<crate::metadata::Metadata>,
id: SubscriptionId,
) -> RpcResult<bool> {
Ok(self.subscriptions().cancel(id))
}
/// New storage subscription
fn subscribe_storage(
&self,
_meta: crate::metadata::Metadata,
subscriber: Subscriber<StorageChangeSet<Block::Hash>>,
keys: Option<Vec<StorageKey>>
) {
let keys = Into::<Option<Vec<_>>>::into(keys);
let stream = match self.client().storage_changes_notification_stream(
keys.as_ref().map(|x| &**x),
None
) {
Ok(stream) => stream,
Err(err) => {
let _ = subscriber.reject(client_err(err).into());
return;
},
};
// initial values
let initial = stream::iter_result(keys
.map(|keys| {
let block = self.client().info().chain.best_hash;
let changes = keys
.into_iter()
.map(|key| self.storage(Some(block.clone()).into(), key.clone())
.map(|val| (key.clone(), val))
.wait()
.unwrap_or_else(|_| (key, None))
)
.collect();
vec![Ok(Ok(StorageChangeSet { block, changes }))]
}).unwrap_or_default());
self.subscriptions().add(subscriber, |sink| {
let stream = stream
.map(|(block, changes)| Ok::<_, ()>(Ok(StorageChangeSet {
block,
changes: changes.iter()
.filter_map(|(o_sk, k, v)| if o_sk.is_none() {
Some((k.clone(),v.cloned()))
} else { None }).collect(),
})))
.compat();
sink
.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))
.send_all(initial.chain(stream))
// we ignore the resulting Stream (if the first stream is over we are unsubscribed)
.map(|_| ())
})
}
/// Unsubscribe from storage subscription
fn unsubscribe_storage(
&self,
_meta: Option<crate::metadata::Metadata>,
id: SubscriptionId,
) -> RpcResult<bool> {
Ok(self.subscriptions().cancel(id))
}
}
/// Splits passed range into two subranges where:
/// - first range has at least one element in it;
/// - second range (optionally) starts at given `middle` element.
pub(crate) fn split_range(size: usize, middle: Option<usize>) -> (Range<usize>, Option<Range<usize>>) {
// check if we can filter blocks-with-changes from some (sub)range using changes tries
let range2_begin = match middle {
// some of required changes tries are pruned => use available tries
Some(middle) if middle != 0 => Some(middle),
// all required changes tries are available, but we still want values at first block
// => do 'unfiltered' read for the first block and 'filtered' for the rest
Some(_) if size > 1 => Some(1),
// range contains single element => do not use changes tries
Some(_) => None,
// changes tries are not available => do 'unfiltered' read for the whole range
None => None,
};
let range1 = 0..range2_begin.unwrap_or(size);
let range2 = range2_begin.map(|begin| begin..size);
(range1, range2)
}
fn invalid_block_range<H: Header>(from: Option<&H>, to: Option<&H>, reason: String) -> error::Error {
let to_string = |x: Option<&H>| match x {
None => "unknown hash".into(),
Some(h) => format!("{} ({})", h.number(), h.hash()),
};
error::Error::InvalidBlockRange {
from: to_string(from),
to: to_string(to),
details: reason,
/// Create new state API that works on full node.
pub fn new_full<B, E, Block: BlockT, RA>(
client: Arc<Client<B, E, Block, RA>>,
subscriptions: Subscriptions,
) -> State<B, E, Block, RA>
where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client<B, E, Block, RA>: ProvideRuntimeApi,
<Client<B, E, Block, RA> as ProvideRuntimeApi>::Api: Metadata<Block>,
{
State {
backend: Box::new(self::state_full::FullState::new(client, subscriptions)),
}
}
/// Create new state API that works on light node.
pub fn new_light<B, E, Block: BlockT, RA, F: Fetcher<Block>>(
client: Arc<Client<B, E, Block, RA>>,
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
) -> State<B, E, Block, RA>
where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
F: Send + Sync + 'static,
{
State {
backend: Box::new(self::state_light::LightState::new(
client,
subscriptions,
remote_blockchain,
fetcher,
)),
}
}
/// State API with subscriptions support.
pub struct State<B, E, Block, RA> {
backend: Box<dyn StateBackend<B, E, Block, RA>>,
}
impl<B, E, Block, RA> StateApi<Block::Hash> for State<B, E, Block, RA>
where
Block: BlockT<Hash=H256> + 'static,
B: client::backend::Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
{
type Metadata = crate::metadata::Metadata;
fn call(&self, method: String, data: Bytes, block: Option<Block::Hash>) -> FutureResult<Bytes> {
self.backend.call(block, method, data)
}
fn storage_keys(
&self,
key_prefix: StorageKey,
block: Option<Block::Hash>,
) -> FutureResult<Vec<StorageKey>> {
self.backend.storage_keys(block, key_prefix)
}
fn storage(&self, key: StorageKey, block: Option<Block::Hash>) -> FutureResult<Option<StorageData>> {
self.backend.storage(block, key)
}
fn storage_hash(&self, key: StorageKey, block: Option<Block::Hash>) -> FutureResult<Option<Block::Hash>> {
self.backend.storage_hash(block, key)
}
fn storage_size(&self, key: StorageKey, block: Option<Block::Hash>) -> FutureResult<Option<u64>> {
self.backend.storage_size(block, key)
}
fn child_storage(
&self,
child_storage_key: StorageKey,
key: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Option<StorageData>> {
self.backend.child_storage(block, child_storage_key, key)
}
fn child_storage_keys(
&self,
child_storage_key: StorageKey,
key_prefix: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Vec<StorageKey>> {
self.backend.child_storage_keys(block, child_storage_key, key_prefix)
}
fn child_storage_hash(
&self,
child_storage_key: StorageKey,
key: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Option<Block::Hash>> {
self.backend.child_storage_hash(block, child_storage_key, key)
}
fn child_storage_size(
&self,
child_storage_key: StorageKey,
key: StorageKey,
block: Option<Block::Hash>
) -> FutureResult<Option<u64>> {
self.backend.child_storage_size(block, child_storage_key, key)
}
fn metadata(&self, block: Option<Block::Hash>) -> FutureResult<Bytes> {
self.backend.metadata(block)
}
fn query_storage(
&self,
keys: Vec<StorageKey>,
from: Block::Hash,
to: Option<Block::Hash>
) -> FutureResult<Vec<StorageChangeSet<Block::Hash>>> {
self.backend.query_storage(from, to, keys)
}
fn subscribe_storage(
&self,
meta: Self::Metadata,
subscriber: Subscriber<StorageChangeSet<Block::Hash>>,
keys: Option<Vec<StorageKey>>
) {
self.backend.subscribe_storage(meta, subscriber, keys);
}
fn unsubscribe_storage(&self, meta: Option<Self::Metadata>, id: SubscriptionId) -> RpcResult<bool> {
self.backend.unsubscribe_storage(meta, id)
}
fn runtime_version(&self, at: Option<Block::Hash>) -> FutureResult<RuntimeVersion> {
self.backend.runtime_version(at)
}
fn subscribe_runtime_version(&self, meta: Self::Metadata, subscriber: Subscriber<RuntimeVersion>) {
self.backend.subscribe_runtime_version(meta, subscriber);
}
fn unsubscribe_runtime_version(
&self,
meta: Option<Self::Metadata>,
id: SubscriptionId,
) -> RpcResult<bool> {
self.backend.unsubscribe_runtime_version(meta, id)
}
}
fn client_err(err: client::error::Error) -> Error {
Error::Client(Box::new(err))
}
+389
View File
@@ -0,0 +1,389 @@
// Copyright 2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! State API backend for full nodes.
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use std::ops::Range;
use rpc::futures::future::result;
use api::Subscriptions;
use client::{
Client, CallExecutor, runtime_api::Metadata,
backend::Backend, error::Result as ClientResult,
};
use primitives::{
H256, Blake2Hasher, Bytes,
storage::{StorageKey, StorageData, StorageChangeSet},
};
use runtime_version::RuntimeVersion;
use state_machine::{NeverOffchainExt, ExecutionStrategy};
use sr_primitives::{
generic::BlockId,
traits::{Block as BlockT, Header, NumberFor, ProvideRuntimeApi, SaturatedConversion},
};
use super::{StateBackend, error::{FutureResult, Error, Result}, client_err};
/// Ranges to query in state_queryStorage.
struct QueryStorageRange<Block: BlockT> {
/// Hashes of all the blocks in the range.
pub hashes: Vec<Block::Hash>,
/// Number of the first block in the range.
pub first_number: NumberFor<Block>,
/// Blocks subrange ([begin; end) indices within `hashes`) where we should read keys at
/// each state to get changes.
pub unfiltered_range: Range<usize>,
/// Blocks subrange ([begin; end) indices within `hashes`) where we could pre-filter
/// blocks-with-changes by using changes tries.
pub filtered_range: Option<Range<usize>>,
}
pub struct FullState<B, E, Block: BlockT, RA> {
client: Arc<Client<B, E, Block, RA>>,
subscriptions: Subscriptions,
}
impl<B, E, Block: BlockT, RA> FullState<B, E, Block, RA>
where
Block: BlockT<Hash=H256> + 'static,
B: Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
{
///
pub fn new(client: Arc<Client<B, E, Block, RA>>, subscriptions: Subscriptions) -> Self {
Self { client, subscriptions }
}
/// Returns given block hash or best block hash if None is passed.
fn block_or_best(&self, hash: Option<Block::Hash>) -> ClientResult<Block::Hash> {
crate::helpers::unwrap_or_else(|| Ok(self.client.info().chain.best_hash), hash)
}
/// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges.
/// Blocks that contain changes within filtered subrange could be filtered using changes tries.
/// Blocks that contain changes within unfiltered subrange must be filtered manually.
fn split_query_storage_range(
&self,
from: Block::Hash,
to: Option<Block::Hash>
) -> Result<QueryStorageRange<Block>> {
let to = self.block_or_best(to).map_err(client_err)?;
let from_hdr = self.client.header(&BlockId::hash(from)).map_err(client_err)?;
let to_hdr = self.client.header(&BlockId::hash(to)).map_err(client_err)?;
match (from_hdr, to_hdr) {
(Some(ref from), Some(ref to)) if from.number() <= to.number() => {
// check if we can get from `to` to `from` by going through parent_hashes.
let from_number = *from.number();
let blocks = {
let mut blocks = vec![to.hash()];
let mut last = to.clone();
while *last.number() > from_number {
let hdr = self.client
.header(&BlockId::hash(*last.parent_hash()))
.map_err(client_err)?;
if let Some(hdr) = hdr {
blocks.push(hdr.hash());
last = hdr;
} else {
return Err(invalid_block_range(
Some(from),
Some(to),
format!("Parent of {} ({}) not found", last.number(), last.hash()),
))
}
}
if last.hash() != from.hash() {
return Err(invalid_block_range(
Some(from),
Some(to),
format!("Expected to reach `from`, got {} ({})", last.number(), last.hash()),
))
}
blocks.reverse();
blocks
};
// check if we can filter blocks-with-changes from some (sub)range using changes tries
let changes_trie_range = self.client
.max_key_changes_range(from_number, BlockId::Hash(to.hash()))
.map_err(client_err)?;
let filtered_range_begin = changes_trie_range
.map(|(begin, _)| (begin - from_number).saturated_into::<usize>());
let (unfiltered_range, filtered_range) = split_range(blocks.len(), filtered_range_begin);
Ok(QueryStorageRange {
hashes: blocks,
first_number: from_number,
unfiltered_range,
filtered_range,
})
},
(from, to) => Err(
invalid_block_range(from.as_ref(), to.as_ref(), "Invalid range or unknown block".into())
),
}
}
/// Iterates through range.unfiltered_range and check each block for changes of keys' values.
fn query_storage_unfiltered(
&self,
range: &QueryStorageRange<Block>,
keys: &[StorageKey],
last_values: &mut HashMap<StorageKey, Option<StorageData>>,
changes: &mut Vec<StorageChangeSet<Block::Hash>>,
) -> Result<()> {
for block in range.unfiltered_range.start..range.unfiltered_range.end {
let block_hash = range.hashes[block].clone();
let mut block_changes = StorageChangeSet { block: block_hash.clone(), changes: Vec::new() };
let id = BlockId::hash(block_hash);
for key in keys {
let (has_changed, data) = {
let curr_data = self.client.storage(&id, key).map_err(client_err)?;
match last_values.get(key) {
Some(prev_data) => (curr_data != *prev_data, curr_data),
None => (true, curr_data),
}
};
if has_changed {
block_changes.changes.push((key.clone(), data.clone()));
}
last_values.insert(key.clone(), data);
}
if !block_changes.changes.is_empty() {
changes.push(block_changes);
}
}
Ok(())
}
/// Iterates through all blocks that are changing keys within range.filtered_range and collects these changes.
fn query_storage_filtered(
&self,
range: &QueryStorageRange<Block>,
keys: &[StorageKey],
last_values: &HashMap<StorageKey, Option<StorageData>>,
changes: &mut Vec<StorageChangeSet<Block::Hash>>,
) -> Result<()> {
let (begin, end) = match range.filtered_range {
Some(ref filtered_range) => (
range.first_number + filtered_range.start.saturated_into(),
BlockId::Hash(range.hashes[filtered_range.end - 1].clone())
),
None => return Ok(()),
};
let mut changes_map: BTreeMap<NumberFor<Block>, StorageChangeSet<Block::Hash>> = BTreeMap::new();
for key in keys {
let mut last_block = None;
let mut last_value = last_values.get(key).cloned().unwrap_or_default();
let key_changes = self.client.key_changes(begin, end, key).map_err(client_err)?;
for (block, _) in key_changes.into_iter().rev() {
if last_block == Some(block) {
continue;
}
let block_hash = range.hashes[(block - range.first_number).saturated_into::<usize>()].clone();
let id = BlockId::Hash(block_hash);
let value_at_block = self.client.storage(&id, key).map_err(client_err)?;
if last_value == value_at_block {
continue;
}
changes_map.entry(block)
.or_insert_with(|| StorageChangeSet { block: block_hash, changes: Vec::new() })
.changes.push((key.clone(), value_at_block.clone()));
last_block = Some(block);
last_value = value_at_block;
}
}
if let Some(additional_capacity) = changes_map.len().checked_sub(changes.len()) {
changes.reserve(additional_capacity);
}
changes.extend(changes_map.into_iter().map(|(_, cs)| cs));
Ok(())
}
}
impl<B, E, Block, RA> StateBackend<B, E, Block, RA> for FullState<B, E, Block, RA>
where
Block: BlockT<Hash=H256> + 'static,
B: Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
Client<B, E, Block, RA>: ProvideRuntimeApi,
<Client<B, E, Block, RA> as ProvideRuntimeApi>::Api: Metadata<Block>,
{
fn client(&self) -> &Arc<Client<B, E, Block, RA>> {
&self.client
}
fn subscriptions(&self) -> &Subscriptions {
&self.subscriptions
}
fn call(
&self,
block: Option<Block::Hash>,
method: String,
call_data: Bytes,
) -> FutureResult<Bytes> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.executor()
.call(
&BlockId::Hash(block),
&method,
&*call_data,
ExecutionStrategy::NativeElseWasm,
NeverOffchainExt::new(),
)
.map(Into::into))
.map_err(client_err)))
}
fn storage_keys(
&self,
block: Option<Block::Hash>,
prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.storage_keys(&BlockId::Hash(block), &prefix))
.map_err(client_err)))
}
fn storage(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> FutureResult<Option<StorageData>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.storage(&BlockId::Hash(block), &key))
.map_err(client_err)))
}
fn storage_hash(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.storage_hash(&BlockId::Hash(block), &key))
.map_err(client_err)))
}
fn child_storage_keys(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.child_storage_keys(&BlockId::Hash(block), &child_storage_key, &prefix))
.map_err(client_err)))
}
fn child_storage(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
key: StorageKey,
) -> FutureResult<Option<StorageData>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.child_storage(&BlockId::Hash(block), &child_storage_key, &key))
.map_err(client_err)))
}
fn child_storage_hash(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.child_storage_hash(&BlockId::Hash(block), &child_storage_key, &key))
.map_err(client_err)))
}
fn metadata(&self, block: Option<Block::Hash>) -> FutureResult<Bytes> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.runtime_api().metadata(&BlockId::Hash(block)).map(Into::into))
.map_err(client_err)))
}
fn runtime_version(&self, block: Option<Block::Hash>) -> FutureResult<RuntimeVersion> {
Box::new(result(
self.block_or_best(block)
.and_then(|block| self.client.runtime_version_at(&BlockId::Hash(block)))
.map_err(client_err)))
}
fn query_storage(
&self,
from: Block::Hash,
to: Option<Block::Hash>,
keys: Vec<StorageKey>,
) -> FutureResult<Vec<StorageChangeSet<Block::Hash>>> {
let call_fn = move || {
let range = self.split_query_storage_range(from, to)?;
let mut changes = Vec::new();
let mut last_values = HashMap::new();
self.query_storage_unfiltered(&range, &keys, &mut last_values, &mut changes)?;
self.query_storage_filtered(&range, &keys, &last_values, &mut changes)?;
Ok(changes)
};
Box::new(result(call_fn()))
}
}
/// Splits passed range into two subranges where:
/// - first range has at least one element in it;
/// - second range (optionally) starts at given `middle` element.
pub(crate) fn split_range(size: usize, middle: Option<usize>) -> (Range<usize>, Option<Range<usize>>) {
// check if we can filter blocks-with-changes from some (sub)range using changes tries
let range2_begin = match middle {
// some of required changes tries are pruned => use available tries
Some(middle) if middle != 0 => Some(middle),
// all required changes tries are available, but we still want values at first block
// => do 'unfiltered' read for the first block and 'filtered' for the rest
Some(_) if size > 1 => Some(1),
// range contains single element => do not use changes tries
Some(_) => None,
// changes tries are not available => do 'unfiltered' read for the whole range
None => None,
};
let range1 = 0..range2_begin.unwrap_or(size);
let range2 = range2_begin.map(|begin| begin..size);
(range1, range2)
}
fn invalid_block_range<H: Header>(from: Option<&H>, to: Option<&H>, reason: String) -> Error {
let to_string = |x: Option<&H>| match x {
None => "unknown hash".into(),
Some(h) => format!("{} ({})", h.number(), h.hash()),
};
Error::InvalidBlockRange {
from: to_string(from),
to: to_string(to),
details: reason,
}
}
+283
View File
@@ -0,0 +1,283 @@
// Copyright 2019 Parity Technologies (UK) Ltd.
// This file is part of Substrate.
// Substrate is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Substrate is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
//! State API backend for light nodes.
use std::sync::Arc;
use codec::Decode;
use futures03::{future::{ready, Either}, FutureExt, TryFutureExt};
use hash_db::Hasher;
use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId};
use rpc::{
Result as RpcResult,
futures::future::{result, Future},
};
use api::Subscriptions;
use client::{
Client, CallExecutor, backend::Backend,
error::Error as ClientError,
light::{
blockchain::{future_header, RemoteBlockchain},
fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadChildRequest},
},
};
use primitives::{
H256, Blake2Hasher, Bytes, OpaqueMetadata,
storage::{StorageKey, StorageData, StorageChangeSet},
};
use runtime_version::RuntimeVersion;
use sr_primitives::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT},
};
use super::{StateBackend, error::{FutureResult, Error}, client_err};
pub struct LightState<Block: BlockT, F: Fetcher<Block>, B, E, RA> {
client: Arc<Client<B, E, Block, RA>>,
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
}
impl<Block: BlockT, F: Fetcher<Block> + 'static, B, E, RA> LightState<Block, F, B, E, RA>
where
Block: BlockT<Hash=H256>,
B: Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
{
///
pub fn new(
client: Arc<Client<B, E, Block, RA>>,
subscriptions: Subscriptions,
remote_blockchain: Arc<dyn RemoteBlockchain<Block>>,
fetcher: Arc<F>,
) -> Self {
Self { client, subscriptions, remote_blockchain, fetcher, }
}
/// Returns given block hash or best block hash if None is passed.
fn block_or_best(&self, hash: Option<Block::Hash>) -> Block::Hash {
hash.unwrap_or_else(|| self.client.info().chain.best_hash)
}
/// Resolve header by hash.
fn resolve_header(
&self,
block: Option<Block::Hash>,
) -> impl std::future::Future<Output = Result<Block::Header, Error>> {
let block = self.block_or_best(block);
let maybe_header = future_header(
&*self.remote_blockchain,
&*self.fetcher,
BlockId::Hash(block),
);
maybe_header.then(move |result|
ready(result.and_then(|maybe_header|
maybe_header.ok_or(ClientError::UnknownBlock(format!("{}", block)))
).map_err(client_err)),
)
}
}
impl<Block, F, B, E, RA> StateBackend<B, E, Block, RA> for LightState<Block, F, B, E, RA>
where
Block: BlockT<Hash=H256>,
B: Backend<Block, Blake2Hasher> + Send + Sync + 'static,
E: CallExecutor<Block, Blake2Hasher> + Send + Sync + 'static + Clone,
RA: Send + Sync + 'static,
F: Fetcher<Block> + 'static
{
fn client(&self) -> &Arc<Client<B, E, Block, RA>> {
&self.client
}
fn subscriptions(&self) -> &Subscriptions {
&self.subscriptions
}
fn call(
&self,
block: Option<Block::Hash>,
method: String,
call_data: Bytes,
) -> FutureResult<Bytes> {
let fetcher = self.fetcher.clone();
let call_result = self.resolve_header(block)
.then(move |result| match result {
Ok(header) => Either::Left(fetcher.remote_call(RemoteCallRequest {
block: header.hash(),
header,
method,
call_data: call_data.0,
retry_count: Default::default(),
}).then(|result| ready(result.map(Bytes).map_err(client_err)))),
Err(error) => Either::Right(ready(Err(error))),
});
Box::new(call_result.boxed().compat())
}
fn storage_keys(
&self,
_block: Option<Block::Hash>,
_prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>> {
Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient))))
}
fn storage(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> FutureResult<Option<StorageData>> {
let fetcher = self.fetcher.clone();
let storage = self.resolve_header(block)
.then(move |result| match result {
Ok(header) => Either::Left(fetcher.remote_read(RemoteReadRequest {
block: header.hash(),
header,
key: key.0,
retry_count: Default::default(),
}).then(|result| ready(result.map(|data| data.map(StorageData)).map_err(client_err)))),
Err(error) => Either::Right(ready(Err(error))),
});
Box::new(storage.boxed().compat())
}
fn storage_hash(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>> {
Box::new(self
.storage(block, key)
.and_then(|maybe_storage|
result(Ok(maybe_storage.map(|storage| Blake2Hasher::hash(&storage.0))))
)
)
}
fn child_storage_keys(
&self,
_block: Option<Block::Hash>,
_child_storage_key: StorageKey,
_prefix: StorageKey,
) -> FutureResult<Vec<StorageKey>> {
Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient))))
}
fn child_storage(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
key: StorageKey,
) -> FutureResult<Option<StorageData>> {
let fetcher = self.fetcher.clone();
let child_storage = self.resolve_header(block)
.then(move |result| match result {
Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest {
block: header.hash(),
header,
storage_key: child_storage_key.0,
key: key.0,
retry_count: Default::default(),
}).then(|result| ready(result.map(|data| data.map(StorageData)).map_err(client_err)))),
Err(error) => Either::Right(ready(Err(error))),
});
Box::new(child_storage.boxed().compat())
}
fn child_storage_hash(
&self,
block: Option<Block::Hash>,
child_storage_key: StorageKey,
key: StorageKey,
) -> FutureResult<Option<Block::Hash>> {
Box::new(self
.child_storage(block, child_storage_key, key)
.and_then(|maybe_storage|
result(Ok(maybe_storage.map(|storage| Blake2Hasher::hash(&storage.0))))
)
)
}
fn metadata(&self, block: Option<Block::Hash>) -> FutureResult<Bytes> {
let metadata = self.call(block, "Metadata_metadata".into(), Bytes(Vec::new()))
.and_then(|metadata| OpaqueMetadata::decode(&mut &metadata.0[..])
.map(Into::into)
.map_err(|decode_err| client_err(ClientError::CallResultDecode(
"Unable to decode metadata",
decode_err,
))));
Box::new(metadata)
}
fn runtime_version(&self, block: Option<Block::Hash>) -> FutureResult<RuntimeVersion> {
let version = self.call(block, "Core_version".into(), Bytes(Vec::new()))
.and_then(|version| Decode::decode(&mut &version.0[..])
.map_err(|_| client_err(ClientError::VersionInvalid))
);
Box::new(version)
}
fn query_storage(
&self,
_from: Block::Hash,
_to: Option<Block::Hash>,
_keys: Vec<StorageKey>,
) -> FutureResult<Vec<StorageChangeSet<Block::Hash>>> {
Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient))))
}
fn subscribe_storage(
&self,
_meta: crate::metadata::Metadata,
_subscriber: Subscriber<StorageChangeSet<Block::Hash>>,
_keys: Option<Vec<StorageKey>>
) {
}
fn unsubscribe_storage(
&self,
_meta: Option<crate::metadata::Metadata>,
_id: SubscriptionId,
) -> RpcResult<bool> {
Ok(false)
}
fn subscribe_runtime_version(
&self,
_meta: crate::metadata::Metadata,
_subscriber: Subscriber<RuntimeVersion>,
) {
}
fn unsubscribe_runtime_version(
&self,
_meta: Option<crate::metadata::Metadata>,
_id: SubscriptionId,
) -> RpcResult<bool> {
Ok(false)
}
}
+28 -22
View File
@@ -15,9 +15,12 @@
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.
use super::*;
use super::state_full::split_range;
use self::error::Error;
use std::sync::Arc;
use assert_matches::assert_matches;
use futures::stream::Stream;
use primitives::storage::well_known_keys;
use sr_io::blake2_256;
use test_client::{
@@ -39,21 +42,22 @@ fn should_return_storage() {
.add_extra_child_storage(STORAGE_KEY.to_vec(), KEY.to_vec(), CHILD_VALUE.to_vec())
.build();
let genesis_hash = client.genesis_hash();
let client = State::new(Arc::new(client), Subscriptions::new(Arc::new(core.executor())));
let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor())));
let key = StorageKey(KEY.to_vec());
let storage_key = StorageKey(STORAGE_KEY.to_vec());
assert_eq!(
client.storage(key.clone(), Some(genesis_hash).into())
client.storage(key.clone(), Some(genesis_hash).into()).wait()
.map(|x| x.map(|x| x.0.len())).unwrap().unwrap() as usize,
VALUE.len(),
);
assert_matches!(
client.storage_hash(key.clone(), Some(genesis_hash).into()).map(|x| x.is_some()),
client.storage_hash(key.clone(), Some(genesis_hash).into()).wait()
.map(|x| x.is_some()),
Ok(true)
);
assert_eq!(
client.storage_size(key.clone(), None).unwrap().unwrap() as usize,
client.storage_size(key.clone(), None).wait().unwrap().unwrap() as usize,
VALUE.len(),
);
assert_eq!(
@@ -71,22 +75,22 @@ fn should_return_child_storage() {
.add_child_storage("test", "key", vec![42_u8])
.build());
let genesis_hash = client.genesis_hash();
let client = State::new(client, Subscriptions::new(Arc::new(core.executor())));
let client = new_full(client, Subscriptions::new(Arc::new(core.executor())));
let child_key = StorageKey(well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().chain(b"test").cloned().collect());
let key = StorageKey(b"key".to_vec());
assert_matches!(
client.child_storage(child_key.clone(), key.clone(), Some(genesis_hash).into()),
client.child_storage(child_key.clone(), key.clone(), Some(genesis_hash).into()).wait(),
Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1
);
assert_matches!(
client.child_storage_hash(child_key.clone(), key.clone(), Some(genesis_hash).into())
.map(|x| x.is_some()),
.wait().map(|x| x.is_some()),
Ok(true)
);
assert_matches!(
client.child_storage_size(child_key.clone(), key.clone(), None),
client.child_storage_size(child_key.clone(), key.clone(), None).wait(),
Ok(Some(1))
);
}
@@ -96,10 +100,10 @@ fn should_call_contract() {
let core = tokio::runtime::Runtime::new().unwrap();
let client = Arc::new(test_client::new());
let genesis_hash = client.genesis_hash();
let client = State::new(client, Subscriptions::new(Arc::new(core.executor())));
let client = new_full(client, Subscriptions::new(Arc::new(core.executor())));
assert_matches!(
client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()),
client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()).wait(),
Err(Error::Client(_))
)
}
@@ -111,21 +115,22 @@ fn should_notify_about_storage_changes() {
let (subscriber, id, transport) = Subscriber::new_test("test");
{
let api = State::new(Arc::new(test_client::new()), Subscriptions::new(Arc::new(remote)));
let client = Arc::new(test_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
api.subscribe_storage(Default::default(), subscriber, None.into());
// assert id assigned
assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1))));
let mut builder = api.client.new_block(Default::default()).unwrap();
let mut builder = client.new_block(Default::default()).unwrap();
builder.push_transfer(runtime::Transfer {
from: AccountKeyring::Alice.into(),
to: AccountKeyring::Ferdie.into(),
amount: 42,
nonce: 0,
}).unwrap();
api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
}
// assert notification sent to transport
@@ -142,7 +147,8 @@ fn should_send_initial_storage_changes_and_notifications() {
let (subscriber, id, transport) = Subscriber::new_test("test");
{
let api = State::new(Arc::new(test_client::new()), Subscriptions::new(Arc::new(remote)));
let client = Arc::new(test_client::new());
let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote)));
let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into()));
@@ -153,14 +159,14 @@ fn should_send_initial_storage_changes_and_notifications() {
// assert id assigned
assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1))));
let mut builder = api.client.new_block(Default::default()).unwrap();
let mut builder = client.new_block(Default::default()).unwrap();
builder.push_transfer(runtime::Transfer {
from: AccountKeyring::Alice.into(),
to: AccountKeyring::Ferdie.into(),
amount: 42,
nonce: 0,
}).unwrap();
api.client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
client.import(BlockOrigin::Own, builder.bake().unwrap()).unwrap();
}
// assert initial values sent to transport
@@ -177,7 +183,7 @@ fn should_send_initial_storage_changes_and_notifications() {
fn should_query_storage() {
fn run_tests(client: Arc<TestClient>) {
let core = tokio::runtime::Runtime::new().unwrap();
let api = State::new(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let add_block = |nonce| {
let mut builder = client.new_block(Default::default()).unwrap();
@@ -229,7 +235,7 @@ fn should_query_storage() {
Some(block1_hash).into(),
);
assert_eq!(result.unwrap(), expected);
assert_eq!(result.wait().unwrap(), expected);
// Query all changes
let result = api.query_storage(
@@ -246,7 +252,7 @@ fn should_query_storage() {
(StorageKey(vec![5]), Some(StorageData(vec![1]))),
],
});
assert_eq!(result.unwrap(), expected);
assert_eq!(result.wait().unwrap(), expected);
}
run_tests(Arc::new(test_client::new()));
@@ -268,7 +274,7 @@ fn should_return_runtime_version() {
let core = tokio::runtime::Runtime::new().unwrap();
let client = Arc::new(test_client::new());
let api = State::new(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\
\"specVersion\":1,\"implVersion\":1,\"apis\":[[\"0xdf6acb689907609b\",2],\
@@ -276,7 +282,7 @@ fn should_return_runtime_version() {
[\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",1],\
[\"0xf78b278be53f454c\",1],[\"0xab3c0572291feb8b\",1]]}";
let runtime_version = api.runtime_version(None.into()).unwrap();
let runtime_version = api.runtime_version(None.into()).wait().unwrap();
let serialized = serde_json::to_string(&runtime_version).unwrap();
assert_eq!(serialized, result);
@@ -291,7 +297,7 @@ fn should_notify_on_runtime_version_initially() {
{
let client = Arc::new(test_client::new());
let api = State::new(client.clone(), Subscriptions::new(Arc::new(core.executor())));
let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor())));
api.subscribe_runtime_version(Default::default(), subscriber);