feat: initialize Kurdistan SDK - independent fork of Polkadot SDK

This commit is contained in:
2025-12-13 15:44:15 +03:00
commit 286de54384
6841 changed files with 1848356 additions and 0 deletions
+216
View File
@@ -0,0 +1,216 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate block-author/full-node API.
#[cfg(test)]
mod tests;
use self::error::{Error, Result};
use crate::{
utils::{spawn_subscription_task, BoundedVecDeque, PendingSubscription},
SubscriptionTaskExecutor,
};
use codec::{Decode, Encode};
use jsonrpsee::{core::async_trait, types::ErrorObject, Extensions, PendingSubscriptionSink};
use sc_rpc_api::check_if_safe;
use sc_transaction_pool_api::{
error::IntoPoolError, BlockHash, InPoolTransaction, TransactionFor, TransactionPool,
TransactionSource, TxHash, TxInvalidityReportMap,
};
use sp_api::{ApiExt, ProvideRuntimeApi};
use sp_blockchain::HeaderBackend;
use sp_core::Bytes;
use sp_keystore::{KeystoreExt, KeystorePtr};
use sp_runtime::traits::Block as BlockT;
use sp_session::SessionKeys;
use std::sync::Arc;
/// Re-export the API for backward compatibility.
pub use sc_rpc_api::author::*;
/// Authoring API
pub struct Author<P, Client> {
/// Substrate client
client: Arc<Client>,
/// Transactions pool
pool: Arc<P>,
/// The key store.
keystore: KeystorePtr,
/// Executor to spawn subscriptions.
executor: SubscriptionTaskExecutor,
}
impl<P, Client> Author<P, Client> {
/// Create new instance of Authoring API.
pub fn new(
client: Arc<Client>,
pool: Arc<P>,
keystore: KeystorePtr,
executor: SubscriptionTaskExecutor,
) -> Self {
Author { client, pool, keystore, executor }
}
}
/// Currently we treat all RPC transactions as externals.
///
/// Possibly in the future we could allow opt-in for special treatment
/// of such transactions, so that the block authors can inject
/// some unique transactions via RPC and have them included in the pool.
const TX_SOURCE: TransactionSource = TransactionSource::External;
#[async_trait]
impl<P, Client> AuthorApiServer<TxHash<P>, BlockHash<P>> for Author<P, Client>
where
P: TransactionPool + Sync + Send + 'static,
Client: HeaderBackend<P::Block> + ProvideRuntimeApi<P::Block> + Send + Sync + 'static,
Client::Api: SessionKeys<P::Block>,
P::Hash: Unpin,
<P::Block as BlockT>::Hash: Unpin,
{
async fn submit_extrinsic(&self, ext: Bytes) -> Result<TxHash<P>> {
let xt = match Decode::decode(&mut &ext[..]) {
Ok(xt) => xt,
Err(err) => return Err(Error::Client(Box::new(err)).into()),
};
let best_block_hash = self.client.info().best_hash;
self.pool.submit_one(best_block_hash, TX_SOURCE, xt).await.map_err(|e| {
e.into_pool_error()
.map(|e| Error::Pool(e))
.unwrap_or_else(|e| Error::Verification(Box::new(e)))
.into()
})
}
fn insert_key(
&self,
ext: &Extensions,
key_type: String,
suri: String,
public: Bytes,
) -> Result<()> {
check_if_safe(ext)?;
let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?;
self.keystore
.insert(key_type, &suri, &public[..])
.map_err(|_| Error::KeystoreUnavailable)?;
Ok(())
}
fn rotate_keys(&self, ext: &Extensions) -> Result<Bytes> {
check_if_safe(ext)?;
let best_block_hash = self.client.info().best_hash;
let mut runtime_api = self.client.runtime_api();
runtime_api.register_extension(KeystoreExt::from(self.keystore.clone()));
runtime_api
.generate_session_keys(best_block_hash, None)
.map(Into::into)
.map_err(|api_err| Error::Client(Box::new(api_err)).into())
}
fn has_session_keys(&self, ext: &Extensions, session_keys: Bytes) -> Result<bool> {
check_if_safe(ext)?;
let best_block_hash = self.client.info().best_hash;
let keys = self
.client
.runtime_api()
.decode_session_keys(best_block_hash, session_keys.to_vec())
.map_err(|e| Error::Client(Box::new(e)))?
.ok_or(Error::InvalidSessionKeys)?;
Ok(self.keystore.has_keys(&keys))
}
fn has_key(&self, ext: &Extensions, public_key: Bytes, key_type: String) -> Result<bool> {
check_if_safe(ext)?;
let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?;
Ok(self.keystore.has_keys(&[(public_key.to_vec(), key_type)]))
}
fn pending_extrinsics(&self) -> Result<Vec<Bytes>> {
Ok(self.pool.ready().map(|tx| tx.data().encode().into()).collect())
}
async fn remove_extrinsic(
&self,
ext: &Extensions,
bytes_or_hash: Vec<hash::ExtrinsicOrHash<TxHash<P>>>,
) -> Result<Vec<TxHash<P>>> {
check_if_safe(ext)?;
let hashes = bytes_or_hash
.into_iter()
.map(|x| match x {
hash::ExtrinsicOrHash::Hash(h) => Ok((h, None)),
hash::ExtrinsicOrHash::Extrinsic(bytes) => {
let xt = Decode::decode(&mut &bytes[..])?;
Ok((self.pool.hash_of(&xt), None))
},
})
.collect::<Result<TxInvalidityReportMap<TxHash<P>>>>()?;
Ok(self
.pool
.report_invalid(None, hashes)
.await
.into_iter()
.map(|tx| tx.hash().clone())
.collect())
}
fn watch_extrinsic(&self, pending: PendingSubscriptionSink, xt: Bytes) {
let best_block_hash = self.client.info().best_hash;
let dxt = match TransactionFor::<P>::decode(&mut &xt[..]).map_err(|e| Error::from(e)) {
Ok(dxt) => dxt,
Err(e) => {
spawn_subscription_task(&self.executor, pending.reject(e));
return;
},
};
let pool = self.pool.clone();
let fut = async move {
let submit =
pool.submit_and_watch(best_block_hash, TX_SOURCE, dxt).await.map_err(|e| {
e.into_pool_error()
.map(error::Error::from)
.unwrap_or_else(|e| error::Error::Verification(Box::new(e)))
});
let stream = match submit {
Ok(stream) => stream,
Err(err) => {
let _ = pending.reject(ErrorObject::from(err)).await;
return;
},
};
PendingSubscription::from(pending)
.pipe_from_stream(stream, BoundedVecDeque::default())
.await;
};
spawn_subscription_task(&self.executor, fut);
}
}
+330
View File
@@ -0,0 +1,330 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use super::*;
use crate::testing::{test_executor, timeout_secs};
use assert_matches::assert_matches;
use codec::Encode;
use jsonrpsee::{core::EmptyServerParams as EmptyParams, MethodsError as RpcError, RpcModule};
use sc_rpc_api::DenyUnsafe;
use sc_transaction_pool::{BasicPool, FullChainApi};
use sc_transaction_pool_api::TransactionStatus;
use sp_core::{
bytes::to_hex,
crypto::{ByteArray, Pair},
ed25519,
testing::{ED25519, SR25519},
H256,
};
use sp_crypto_hashing::blake2_256;
use sp_keystore::{testing::MemoryKeystore, Keystore};
use sp_runtime::Perbill;
use std::sync::Arc;
use substrate_test_runtime_client::{
self,
runtime::{Block, Extrinsic, ExtrinsicBuilder, SessionKeys, Transfer},
Backend, Client, DefaultTestClientBuilderExt, Sr25519Keyring, TestClientBuilderExt,
};
fn uxt(sender: Sr25519Keyring, nonce: u64) -> Extrinsic {
let tx = Transfer {
amount: Default::default(),
nonce,
from: sender.into(),
to: Sr25519Keyring::Bob.into(),
};
ExtrinsicBuilder::new_transfer(tx).build()
}
type FullTransactionPool = BasicPool<FullChainApi<Client<Backend>, Block>, Block>;
struct TestSetup {
pub client: Arc<Client<Backend>>,
pub keystore: Arc<MemoryKeystore>,
pub pool: Arc<FullTransactionPool>,
}
impl Default for TestSetup {
fn default() -> Self {
let keystore = Arc::new(MemoryKeystore::new());
let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new().build());
let spawner = sp_core::testing::TaskExecutor::new();
let pool = Arc::from(BasicPool::new_full(
Default::default(),
true.into(),
None,
spawner,
client.clone(),
));
TestSetup { client, keystore, pool }
}
}
impl TestSetup {
fn to_rpc(&self) -> RpcModule<Author<FullTransactionPool, Client<Backend>>> {
let mut module = Author {
client: self.client.clone(),
pool: self.pool.clone(),
keystore: self.keystore.clone(),
executor: test_executor(),
}
.into_rpc();
module.extensions_mut().insert(DenyUnsafe::No);
module
}
fn into_rpc() -> RpcModule<Author<FullTransactionPool, Client<Backend>>> {
Self::default().to_rpc()
}
}
#[tokio::test]
async fn author_submit_transaction_should_not_cause_error() {
let api = TestSetup::into_rpc();
let xt: Bytes = uxt(Sr25519Keyring::Alice, 1).encode().into();
let extrinsic_hash: H256 = blake2_256(&xt).into();
let response: H256 = api.call("author_submitExtrinsic", [xt.clone()]).await.unwrap();
assert_eq!(response, extrinsic_hash);
assert_matches!(
api.call::<_, H256>("author_submitExtrinsic", [xt]).await,
Err(RpcError::JsonRpc(err)) if err.message().contains("Already Imported") && err.code() == 1013
);
}
#[tokio::test]
async fn author_should_watch_extrinsic() {
let api = TestSetup::into_rpc();
let xt = to_hex(
&ExtrinsicBuilder::new_call_with_priority(0)
.signer(Sr25519Keyring::Alice.into())
.build()
.encode(),
true,
);
let mut sub = api.subscribe_unbounded("author_submitAndWatchExtrinsic", [xt]).await.unwrap();
let (tx, sub_id) = timeout_secs(10, sub.next::<TransactionStatus<H256, Block>>())
.await
.unwrap()
.unwrap()
.unwrap();
assert_matches!(tx, TransactionStatus::Ready);
assert_eq!(&sub_id, sub.subscription_id());
// Replace the extrinsic and observe the subscription is notified.
let (xt_replacement, xt_hash) = {
let tx = ExtrinsicBuilder::new_call_with_priority(1)
.signer(Sr25519Keyring::Alice.into())
.build()
.encode();
let hash = blake2_256(&tx);
(to_hex(&tx, true), hash)
};
let _ = api.call::<_, H256>("author_submitExtrinsic", [xt_replacement]).await.unwrap();
let (tx, sub_id) = timeout_secs(10, sub.next::<TransactionStatus<H256, Block>>())
.await
.unwrap()
.unwrap()
.unwrap();
assert_eq!(tx, TransactionStatus::Usurped(xt_hash.into()));
assert_eq!(&sub_id, sub.subscription_id());
}
#[tokio::test]
async fn author_should_return_watch_validation_error() {
const METHOD: &'static str = "author_submitAndWatchExtrinsic";
let invalid_xt = ExtrinsicBuilder::new_fill_block(Perbill::from_percent(100)).build();
let api = TestSetup::into_rpc();
let failed_sub = api.subscribe_unbounded(METHOD, [to_hex(&invalid_xt.encode(), true)]).await;
assert_matches!(
failed_sub,
Err(RpcError::JsonRpc(err)) if err.message().contains("Invalid Transaction") && err.code() == 1010
);
}
#[tokio::test]
async fn author_should_return_pending_extrinsics() {
let api = TestSetup::into_rpc();
let xt_bytes: Bytes = uxt(Sr25519Keyring::Alice, 0).encode().into();
api.call::<_, H256>("author_submitExtrinsic", [to_hex(&xt_bytes, true)])
.await
.unwrap();
let pending: Vec<Bytes> =
api.call("author_pendingExtrinsics", EmptyParams::new()).await.unwrap();
assert_eq!(pending, vec![xt_bytes]);
}
#[tokio::test]
async fn author_should_remove_extrinsics() {
const METHOD: &'static str = "author_removeExtrinsic";
let setup = TestSetup::default();
let api = setup.to_rpc();
// Submit three extrinsics, then remove two of them (will cause the third to be removed as well,
// having a higher nonce)
let xt1_bytes = uxt(Sr25519Keyring::Alice, 0).encode();
let xt1 = to_hex(&xt1_bytes, true);
let xt1_hash: H256 = api.call("author_submitExtrinsic", [xt1]).await.unwrap();
let xt2 = to_hex(&uxt(Sr25519Keyring::Alice, 1).encode(), true);
let xt2_hash: H256 = api.call("author_submitExtrinsic", [xt2]).await.unwrap();
let xt3 = to_hex(&uxt(Sr25519Keyring::Bob, 0).encode(), true);
let xt3_hash: H256 = api.call("author_submitExtrinsic", [xt3]).await.unwrap();
assert_eq!(setup.pool.status().ready, 3);
// Now remove all three.
// Notice how we need an extra `Vec` wrapping the `Vec` we want to submit as params.
let removed: Vec<H256> = api
.call(
METHOD,
vec![vec![
hash::ExtrinsicOrHash::Hash(xt3_hash),
// Removing this one will also remove xt2
hash::ExtrinsicOrHash::Extrinsic(xt1_bytes.into()),
]],
)
.await
.unwrap();
assert_eq!(removed, vec![xt1_hash, xt2_hash, xt3_hash]);
}
#[tokio::test]
async fn author_should_insert_key() {
let setup = TestSetup::default();
let api = setup.to_rpc();
let suri = "//Alice";
let keypair = ed25519::Pair::from_string(suri, None).expect("generates keypair");
let params: (String, String, Bytes) = (
String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"),
suri.to_string(),
keypair.public().0.to_vec().into(),
);
api.call::<_, ()>("author_insertKey", params).await.unwrap();
let pubkeys = setup.keystore.keys(ED25519).unwrap();
assert!(pubkeys.contains(&keypair.public().to_raw_vec()));
}
#[tokio::test]
async fn author_should_rotate_keys() {
let setup = TestSetup::default();
let api = setup.to_rpc();
let new_pubkeys: Bytes = api.call("author_rotateKeys", EmptyParams::new()).await.unwrap();
let session_keys =
SessionKeys::decode(&mut &new_pubkeys[..]).expect("SessionKeys decode successfully");
let ed25519_pubkeys = setup.keystore.keys(ED25519).unwrap();
let sr25519_pubkeys = setup.keystore.keys(SR25519).unwrap();
assert!(ed25519_pubkeys.contains(&session_keys.ed25519.to_raw_vec()));
assert!(sr25519_pubkeys.contains(&session_keys.sr25519.to_raw_vec()));
}
#[tokio::test]
async fn author_has_session_keys() {
let api = TestSetup::into_rpc();
// Add a valid session key
let pubkeys: Bytes = api
.call("author_rotateKeys", EmptyParams::new())
.await
.expect("Rotates the keys");
// Add a session key in a different keystore
let non_existent_pubkeys: Bytes = {
let api2 = TestSetup::into_rpc();
api2.call("author_rotateKeys", EmptyParams::new())
.await
.expect("Rotates the keys")
};
// Then…
let existing = api.call::<_, bool>("author_hasSessionKeys", vec![pubkeys]).await.unwrap();
assert!(existing, "Existing key is in the session keys");
let inexistent = api
.call::<_, bool>("author_hasSessionKeys", vec![non_existent_pubkeys])
.await
.unwrap();
assert_eq!(inexistent, false, "Inexistent key is not in the session keys");
assert_matches!(
api.call::<_, bool>("author_hasSessionKeys", vec![Bytes::from(vec![1, 2, 3])]).await,
Err(RpcError::JsonRpc(err)) if err.message().contains("Session keys are not encoded correctly")
);
}
#[tokio::test]
async fn author_has_key() {
let api = TestSetup::into_rpc();
let suri = "//Alice";
let alice_keypair = ed25519::Pair::from_string(suri, None).expect("Generates keypair");
let params = (
String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"),
suri.to_string(),
Bytes::from(alice_keypair.public().0.to_vec()),
);
api.call::<_, ()>("author_insertKey", params).await.expect("insertKey works");
let bob_keypair = ed25519::Pair::from_string("//Bob", None).expect("Generates keypair");
// Alice's ED25519 key is there
let has_alice_ed: bool = {
let params = (
Bytes::from(alice_keypair.public().to_raw_vec()),
String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"),
);
api.call("author_hasKey", params).await.unwrap()
};
assert!(has_alice_ed);
// Alice's SR25519 key is not there
let has_alice_sr: bool = {
let params = (
Bytes::from(alice_keypair.public().to_raw_vec()),
String::from_utf8(SR25519.0.to_vec()).expect("Keytype is a valid string"),
);
api.call("author_hasKey", params).await.unwrap()
};
assert!(!has_alice_sr);
// Bob's ED25519 key is not there
let has_bob_ed: bool = {
let params = (
Bytes::from(bob_keypair.public().to_raw_vec()),
String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"),
);
api.call("author_hasKey", params).await.unwrap()
};
assert!(!has_bob_ed);
}
@@ -0,0 +1,149 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Blockchain API backend for full nodes.
use super::{client_err, ChainBackend, Error};
use crate::{
utils::{spawn_subscription_task, BoundedVecDeque, PendingSubscription},
SubscriptionTaskExecutor,
};
use std::{marker::PhantomData, sync::Arc};
use futures::{
future::{self},
stream::{self, Stream, StreamExt},
};
use jsonrpsee::{core::async_trait, PendingSubscriptionSink};
use sc_client_api::{BlockBackend, BlockchainEvents};
use sp_blockchain::HeaderBackend;
use sp_runtime::{generic::SignedBlock, traits::Block as BlockT};
/// Blockchain API backend for full nodes. Reads all the data from local database.
pub struct FullChain<Block: BlockT, Client> {
/// Substrate client.
client: Arc<Client>,
/// phantom member to pin the block type
_phantom: PhantomData<Block>,
/// Subscription executor.
executor: SubscriptionTaskExecutor,
}
impl<Block: BlockT, Client> FullChain<Block, Client> {
/// Create new Chain API RPC handler.
pub fn new(client: Arc<Client>, executor: SubscriptionTaskExecutor) -> Self {
Self { client, executor, _phantom: PhantomData }
}
}
#[async_trait]
impl<Block, Client> ChainBackend<Client, Block> for FullChain<Block, Client>
where
Block: BlockT + 'static,
Block::Header: Unpin,
Client: BlockBackend<Block> + HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
{
fn client(&self) -> &Arc<Client> {
&self.client
}
fn header(&self, hash: Option<Block::Hash>) -> Result<Option<Block::Header>, Error> {
self.client.header(self.unwrap_or_best(hash)).map_err(client_err)
}
fn block(&self, hash: Option<Block::Hash>) -> Result<Option<SignedBlock<Block>>, Error> {
self.client.block(self.unwrap_or_best(hash)).map_err(client_err)
}
fn subscribe_all_heads(&self, pending: PendingSubscriptionSink) {
subscribe_headers(
&self.client,
&self.executor,
pending,
|| self.client().info().best_hash,
|| {
self.client()
.import_notification_stream()
.map(|notification| notification.header)
},
)
}
fn subscribe_new_heads(&self, pending: PendingSubscriptionSink) {
subscribe_headers(
&self.client,
&self.executor,
pending,
|| self.client().info().best_hash,
|| {
self.client()
.import_notification_stream()
.filter(|notification| future::ready(notification.is_new_best))
.map(|notification| notification.header)
},
)
}
fn subscribe_finalized_heads(&self, pending: PendingSubscriptionSink) {
subscribe_headers(
&self.client,
&self.executor,
pending,
|| self.client().info().finalized_hash,
|| {
self.client()
.finality_notification_stream()
.map(|notification| notification.header)
},
)
}
}
/// Subscribe to new headers.
fn subscribe_headers<Block, Client, F, G, S>(
client: &Arc<Client>,
executor: &SubscriptionTaskExecutor,
pending: PendingSubscriptionSink,
best_block_hash: G,
stream: F,
) where
Block: BlockT + 'static,
Block::Header: Unpin,
Client: HeaderBackend<Block> + 'static,
F: FnOnce() -> S,
G: FnOnce() -> Block::Hash,
S: Stream<Item = Block::Header> + Send + Unpin + 'static,
{
// send current head right at the start.
let maybe_header = client
.header(best_block_hash())
.map_err(client_err)
.and_then(|header| header.ok_or_else(|| Error::Other("Best header missing.".into())))
.map_err(|e| log::warn!("Best header error {:?}", e))
.ok();
// NOTE: by the time we set up the stream there might be a new best block and so there is a risk
// that the stream has a hole in it. The alternative would be to look up the best block *after*
// we set up the stream and chain it to the stream. Consuming code would need to handle
// duplicates at the beginning of the stream though.
let stream = stream::iter(maybe_header).chain(stream());
spawn_subscription_task(
executor,
PendingSubscription::from(pending).pipe_from_stream(stream, BoundedVecDeque::default()),
);
}
+176
View File
@@ -0,0 +1,176 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate blockchain API.
mod chain_full;
#[cfg(test)]
mod tests;
use std::sync::Arc;
use crate::SubscriptionTaskExecutor;
use jsonrpsee::{core::async_trait, PendingSubscriptionSink};
use sc_client_api::BlockchainEvents;
use sp_rpc::{list::ListOrValue, number::NumberOrHex};
use sp_runtime::{
generic::SignedBlock,
traits::{Block as BlockT, NumberFor},
};
use self::error::Error;
use sc_client_api::BlockBackend;
pub use sc_rpc_api::chain::*;
use sp_blockchain::HeaderBackend;
/// Blockchain backend API
#[async_trait]
trait ChainBackend<Client, Block: BlockT>: Send + Sync + 'static
where
Block: BlockT + 'static,
Block::Header: Unpin,
Client: HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
{
/// Get client reference.
fn client(&self) -> &Arc<Client>;
/// Tries to unwrap passed block hash, or uses best block hash otherwise.
fn unwrap_or_best(&self, hash: Option<Block::Hash>) -> Block::Hash {
match hash {
None => self.client().info().best_hash,
Some(hash) => hash,
}
}
/// Get header of a block.
fn header(&self, hash: Option<Block::Hash>) -> Result<Option<Block::Header>, Error>;
/// Get header and body of a block.
fn block(&self, hash: Option<Block::Hash>) -> Result<Option<SignedBlock<Block>>, Error>;
/// Get hash of the n-th block in the canon chain.
///
/// By default returns latest block hash.
fn block_hash(&self, number: Option<NumberOrHex>) -> Result<Option<Block::Hash>, Error> {
match number {
None => Ok(Some(self.client().info().best_hash)),
Some(num_or_hex) => {
// FIXME <2329>: Database seems to limit the block number to u32 for no reason
let block_num: u32 = num_or_hex.try_into().map_err(|_| {
Error::Other(format!(
"`{:?}` > u32::MAX, the max block number is u32.",
num_or_hex
))
})?;
let block_num = <NumberFor<Block>>::from(block_num);
self.client().hash(block_num).map_err(client_err)
},
}
}
/// Get hash of the last finalized block in the canon chain.
fn finalized_head(&self) -> Result<Block::Hash, Error> {
Ok(self.client().info().finalized_hash)
}
/// All new head subscription
fn subscribe_all_heads(&self, pending: PendingSubscriptionSink);
/// New best head subscription
fn subscribe_new_heads(&self, pending: PendingSubscriptionSink);
/// Finalized head subscription
fn subscribe_finalized_heads(&self, pending: PendingSubscriptionSink);
}
/// Create new state API that works on full node.
pub fn new_full<Block: BlockT, Client>(
client: Arc<Client>,
executor: SubscriptionTaskExecutor,
) -> Chain<Block, Client>
where
Block: BlockT + 'static,
Block::Header: Unpin,
Client: BlockBackend<Block> + HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
{
Chain { backend: Box::new(self::chain_full::FullChain::new(client, executor)) }
}
/// Chain API with subscriptions support.
pub struct Chain<Block: BlockT, Client> {
backend: Box<dyn ChainBackend<Client, Block>>,
}
#[async_trait]
impl<Block, Client> ChainApiServer<NumberFor<Block>, Block::Hash, Block::Header, SignedBlock<Block>>
for Chain<Block, Client>
where
Block: BlockT + 'static,
Block::Header: Unpin,
Client: HeaderBackend<Block> + BlockchainEvents<Block> + 'static,
{
fn header(&self, hash: Option<Block::Hash>) -> Result<Option<Block::Header>, Error> {
self.backend.header(hash)
}
fn block(&self, hash: Option<Block::Hash>) -> Result<Option<SignedBlock<Block>>, Error> {
self.backend.block(hash)
}
fn block_hash(
&self,
number: Option<ListOrValue<NumberOrHex>>,
) -> Result<ListOrValue<Option<Block::Hash>>, Error> {
match number {
None => self.backend.block_hash(None).map(ListOrValue::Value),
Some(ListOrValue::Value(number)) => self
.backend
.block_hash(Some(number))
.map(ListOrValue::Value)
.map_err(Into::into),
Some(ListOrValue::List(list)) => Ok(ListOrValue::List(
list.into_iter()
.map(|number| self.backend.block_hash(Some(number)))
.collect::<Result<_, _>>()?,
)),
}
}
fn finalized_head(&self) -> Result<Block::Hash, Error> {
self.backend.finalized_head()
}
fn subscribe_all_heads(&self, pending: PendingSubscriptionSink) {
self.backend.subscribe_all_heads(pending);
}
fn subscribe_new_heads(&self, pending: PendingSubscriptionSink) {
self.backend.subscribe_new_heads(pending)
}
fn subscribe_finalized_heads(&self, pending: PendingSubscriptionSink) {
self.backend.subscribe_finalized_heads(pending)
}
}
fn client_err(err: sp_blockchain::Error) -> Error {
Error::Client(Box::new(err))
}
+275
View File
@@ -0,0 +1,275 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use super::*;
use crate::testing::{test_executor, timeout_secs};
use assert_matches::assert_matches;
use jsonrpsee::core::EmptyServerParams as EmptyParams;
use sc_block_builder::BlockBuilderBuilder;
use sp_consensus::BlockOrigin;
use sp_rpc::list::ListOrValue;
use substrate_test_runtime_client::{
prelude::*,
runtime::{Block, Header, H256},
};
#[tokio::test]
async fn should_return_header() {
let client = Arc::new(substrate_test_runtime_client::new());
let api = new_full(client.clone(), test_executor()).into_rpc();
let res: Header =
api.call("chain_getHeader", [H256::from(client.genesis_hash())]).await.unwrap();
assert_eq!(
res,
Header {
parent_hash: H256::from_low_u64_be(0),
number: 0,
state_root: res.state_root,
extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314"
.parse()
.unwrap(),
digest: Default::default(),
}
);
let res: Header = api.call("chain_getHeader", EmptyParams::new()).await.unwrap();
assert_eq!(
res,
Header {
parent_hash: H256::from_low_u64_be(0),
number: 0,
state_root: res.state_root,
extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314"
.parse()
.unwrap(),
digest: Default::default(),
}
);
assert_matches!(
api.call::<_, Option<Header>>("chain_getHeader", [H256::from_low_u64_be(5)])
.await
.unwrap(),
None
);
}
#[tokio::test]
async fn should_return_a_block() {
let client = Arc::new(substrate_test_runtime_client::new());
let api = new_full(client.clone(), test_executor()).into_rpc();
let block = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().best_hash)
.with_parent_block_number(client.chain_info().best_number)
.build()
.unwrap()
.build()
.unwrap()
.block;
let block_hash = block.hash();
client.import(BlockOrigin::Own, block).await.unwrap();
let res: SignedBlock<Block> =
api.call("chain_getBlock", [H256::from(client.genesis_hash())]).await.unwrap();
// Genesis block is not justified
assert!(res.justifications.is_none());
let res: SignedBlock<Block> =
api.call("chain_getBlock", [H256::from(block_hash)]).await.unwrap();
assert_eq!(
res.block,
Block {
header: Header {
parent_hash: client.genesis_hash(),
number: 1,
state_root: res.block.header.state_root,
extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314"
.parse()
.unwrap(),
digest: Default::default(),
},
extrinsics: vec![],
}
);
let res: SignedBlock<Block> = api.call("chain_getBlock", Vec::<H256>::new()).await.unwrap();
assert_eq!(
res.block,
Block {
header: Header {
parent_hash: client.genesis_hash(),
number: 1,
state_root: res.block.header.state_root,
extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314"
.parse()
.unwrap(),
digest: Default::default(),
},
extrinsics: vec![],
}
);
assert_matches!(
api.call::<_, Option<Header>>("chain_getBlock", [H256::from_low_u64_be(5)])
.await
.unwrap(),
None
);
}
#[tokio::test]
async fn should_return_block_hash() {
let client = Arc::new(substrate_test_runtime_client::new());
let api = new_full(client.clone(), test_executor()).into_rpc();
let res: ListOrValue<Option<H256>> =
api.call("chain_getBlockHash", EmptyParams::new()).await.unwrap();
assert_matches!(
res,
ListOrValue::Value(Some(ref x)) if x == &client.genesis_hash()
);
let res: ListOrValue<Option<H256>> =
api.call("chain_getBlockHash", [ListOrValue::from(0_u64)]).await.unwrap();
assert_matches!(
res,
ListOrValue::Value(Some(ref x)) if x == &client.genesis_hash()
);
let res: Option<ListOrValue<Option<H256>>> =
api.call("chain_getBlockHash", [ListOrValue::from(1_u64)]).await.unwrap();
assert_matches!(res, None);
let block = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().best_hash)
.with_parent_block_number(client.chain_info().best_number)
.build()
.unwrap()
.build()
.unwrap()
.block;
client.import(BlockOrigin::Own, block.clone()).await.unwrap();
let res: ListOrValue<Option<H256>> =
api.call("chain_getBlockHash", [ListOrValue::from(0_u64)]).await.unwrap();
assert_matches!(
res,
ListOrValue::Value(Some(ref x)) if x == &client.genesis_hash()
);
let res: ListOrValue<Option<H256>> =
api.call("chain_getBlockHash", [ListOrValue::from(1_u64)]).await.unwrap();
assert_matches!(
res,
ListOrValue::Value(Some(ref x)) if x == &block.hash()
);
let res: ListOrValue<Option<H256>> = api
.call("chain_getBlockHash", [ListOrValue::Value(sp_core::U256::from(1_u64))])
.await
.unwrap();
assert_matches!(
res,
ListOrValue::Value(Some(ref x)) if x == &block.hash()
);
let res: ListOrValue<Option<H256>> = api
.call("chain_getBlockHash", [ListOrValue::List(vec![0_u64, 1_u64, 2_u64])])
.await
.unwrap();
assert_matches!(
res,
ListOrValue::List(list) if list == &[client.genesis_hash().into(), block.hash().into(), None]
);
}
#[tokio::test]
async fn should_return_finalized_hash() {
let client = Arc::new(substrate_test_runtime_client::new());
let api = new_full(client.clone(), test_executor()).into_rpc();
let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap();
assert_eq!(res, client.genesis_hash());
// import new block
let block = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().best_hash)
.with_parent_block_number(client.chain_info().best_number)
.build()
.unwrap()
.build()
.unwrap()
.block;
let block_hash = block.hash();
client.import(BlockOrigin::Own, block).await.unwrap();
// no finalization yet
let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap();
assert_eq!(res, client.genesis_hash());
// finalize
client.finalize_block(block_hash, None).unwrap();
let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap();
assert_eq!(res, block_hash);
}
#[tokio::test]
async fn should_notify_about_latest_block() {
test_head_subscription("chain_subscribeAllHeads").await;
}
#[tokio::test]
async fn should_notify_about_best_block() {
test_head_subscription("chain_subscribeNewHeads").await;
}
#[tokio::test]
async fn should_notify_about_finalized_block() {
test_head_subscription("chain_subscribeFinalizedHeads").await;
}
async fn test_head_subscription(method: &str) {
let client = Arc::new(substrate_test_runtime_client::new());
let mut sub = {
let api = new_full(client.clone(), test_executor()).into_rpc();
let sub = api.subscribe_unbounded(method, EmptyParams::new()).await.unwrap();
let block = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().best_hash)
.with_parent_block_number(client.chain_info().best_number)
.build()
.unwrap()
.build()
.unwrap()
.block;
let block_hash = block.hash();
client.import(BlockOrigin::Own, block).await.unwrap();
client.finalize_block(block_hash, None).unwrap();
sub
};
assert_matches!(timeout_secs(10, sub.next::<Header>()).await, Ok(Some(_)));
assert_matches!(timeout_secs(10, sub.next::<Header>()).await, Ok(Some(_)));
sub.close();
assert_matches!(timeout_secs(10, sub.next::<Header>()).await, Ok(None));
}
+116
View File
@@ -0,0 +1,116 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Implementation of the [`DevApiServer`] trait providing debug utilities for Substrate based
//! blockchains.
#[cfg(test)]
mod tests;
use jsonrpsee::Extensions;
use sc_client_api::{BlockBackend, HeaderBackend};
use sc_rpc_api::{check_if_safe, dev::error::Error};
use sp_api::{ApiExt, Core, ProvideRuntimeApi};
use sp_core::Encode;
use sp_runtime::{
generic::DigestItem,
traits::{Block as BlockT, Header},
};
use std::{
marker::{PhantomData, Send, Sync},
sync::Arc,
};
pub use sc_rpc_api::dev::{BlockStats, DevApiServer};
type HasherOf<Block> = <<Block as BlockT>::Header as Header>::Hashing;
/// The Dev API. All methods are unsafe.
pub struct Dev<Block: BlockT, Client> {
client: Arc<Client>,
_phantom: PhantomData<Block>,
}
impl<Block: BlockT, Client> Dev<Block, Client> {
/// Create a new Dev API.
pub fn new(client: Arc<Client>) -> Self {
Self { client, _phantom: PhantomData::default() }
}
}
impl<Block, Client> DevApiServer<Block::Hash> for Dev<Block, Client>
where
Block: BlockT + 'static,
Client: BlockBackend<Block>
+ HeaderBackend<Block>
+ ProvideRuntimeApi<Block>
+ Send
+ Sync
+ 'static,
Client::Api: Core<Block>,
{
fn block_stats(
&self,
ext: &Extensions,
hash: Block::Hash,
) -> Result<Option<BlockStats>, Error> {
check_if_safe(ext)?;
let block = {
let block = self.client.block(hash).map_err(|e| Error::BlockQueryError(Box::new(e)))?;
if let Some(block) = block {
let (mut header, body) = block.block.deconstruct();
// Remove the `Seal` to ensure we have the number of digests as expected by the
// runtime.
header.digest_mut().logs.retain(|item| !matches!(item, DigestItem::Seal(_, _)));
Block::new(header, body)
} else {
return Ok(None);
}
};
let parent_header = {
let parent_hash = *block.header().parent_hash();
let parent_header = self
.client
.header(parent_hash)
.map_err(|e| Error::BlockQueryError(Box::new(e)))?;
if let Some(header) = parent_header {
header
} else {
return Ok(None);
}
};
let block_len = block.encoded_size() as u64;
let num_extrinsics = block.extrinsics().len() as u64;
let pre_root = *parent_header.state_root();
let mut runtime_api = self.client.runtime_api();
runtime_api.record_proof();
runtime_api
.execute_block(parent_header.hash(), block.into())
.map_err(|_| Error::BlockExecutionFailed)?;
let witness = runtime_api
.extract_proof()
.expect("We enabled proof recording. A proof must be available; qed");
let witness_len = witness.encoded_size() as u64;
let witness_compact_len = witness
.into_compact_proof::<HasherOf<Block>>(pre_root)
.map_err(|_| Error::WitnessCompactionFailed)?
.encoded_size() as u64;
Ok(Some(BlockStats { witness_len, witness_compact_len, block_len, num_extrinsics }))
}
}
+109
View File
@@ -0,0 +1,109 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use super::*;
use crate::DenyUnsafe;
use sc_block_builder::BlockBuilderBuilder;
use sp_blockchain::HeaderBackend;
use sp_consensus::BlockOrigin;
use substrate_test_runtime_client::{prelude::*, runtime::Block};
#[tokio::test]
async fn block_stats_work() {
let client = Arc::new(substrate_test_runtime_client::new());
let mut api = <Dev<Block, _>>::new(client.clone()).into_rpc();
api.extensions_mut().insert(DenyUnsafe::No);
let block = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().genesis_hash)
.with_parent_block_number(0)
.build()
.unwrap()
.build()
.unwrap()
.block;
let (expected_witness_len, expected_witness_compact_len, expected_block_len) = {
let genesis_hash = client.chain_info().genesis_hash;
let mut runtime_api = client.runtime_api();
runtime_api.record_proof();
runtime_api.execute_block(genesis_hash, block.clone().into()).unwrap();
let witness = runtime_api.extract_proof().unwrap();
let pre_root = *client.header(genesis_hash).unwrap().unwrap().state_root();
(
witness.clone().encoded_size() as u64,
witness.into_compact_proof::<HasherOf<Block>>(pre_root).unwrap().encoded_size() as u64,
block.encoded_size() as u64,
)
};
client.import(BlockOrigin::Own, block).await.unwrap();
// Can't gather stats for a block without a parent.
assert_eq!(
api.call::<_, Option<BlockStats>>("dev_getBlockStats", [client.genesis_hash()])
.await
.unwrap(),
None
);
assert_eq!(
api.call::<_, Option<BlockStats>>("dev_getBlockStats", [client.info().best_hash])
.await
.unwrap(),
Some(BlockStats {
witness_len: expected_witness_len,
witness_compact_len: expected_witness_compact_len,
block_len: expected_block_len,
num_extrinsics: 0,
}),
);
}
#[tokio::test]
async fn deny_unsafe_works() {
let client = Arc::new(substrate_test_runtime_client::new());
let mut api = <Dev<Block, _>>::new(client.clone()).into_rpc();
api.extensions_mut().insert(DenyUnsafe::Yes);
let block = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().genesis_hash)
.with_parent_block_number(0)
.build()
.unwrap()
.build()
.unwrap()
.block;
client.import(BlockOrigin::Own, block).await.unwrap();
let best_hash = client.info().best_hash;
let best_hash_param =
serde_json::to_string(&best_hash).expect("To string must always succeed for block hashes");
let request = format!(
"{{\"jsonrpc\":\"2.0\",\"method\":\"dev_getBlockStats\",\"params\":[{}],\"id\":1}}",
best_hash_param
);
let (resp, _) = api.raw_json_request(&request, 1).await.expect("Raw calls should succeed");
assert_eq!(
resp,
r#"{"jsonrpc":"2.0","id":1,"error":{"code":-32601,"message":"RPC call is unsafe to be called externally"}}"#
);
}
+45
View File
@@ -0,0 +1,45 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate RPC implementation.
//!
//! A core implementation of Substrate RPC interfaces.
#![warn(missing_docs)]
pub use jsonrpsee::core::id_providers::{
RandomIntegerIdProvider as RandomIntegerSubscriptionId,
RandomStringIdProvider as RandomStringSubscriptionId,
};
pub use sc_rpc_api::DenyUnsafe;
pub mod author;
pub mod chain;
pub mod dev;
pub mod mixnet;
pub mod offchain;
pub mod state;
pub mod statement;
pub mod system;
pub mod utils;
#[cfg(any(test, feature = "test-helpers"))]
pub mod testing;
/// Task executor that is being used by RPC subscriptions.
pub type SubscriptionTaskExecutor = std::sync::Arc<dyn sp_core::traits::SpawnNamed>;
+47
View File
@@ -0,0 +1,47 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate mixnet API.
use jsonrpsee::core::async_trait;
use sc_mixnet::Api;
use sc_rpc_api::mixnet::error::Error;
pub use sc_rpc_api::mixnet::MixnetApiServer;
use sp_core::Bytes;
/// Mixnet API.
pub struct Mixnet(futures::lock::Mutex<Api>);
impl Mixnet {
/// Create a new mixnet API instance.
pub fn new(api: Api) -> Self {
Self(futures::lock::Mutex::new(api))
}
}
#[async_trait]
impl MixnetApiServer for Mixnet {
async fn submit_extrinsic(&self, extrinsic: Bytes) -> Result<(), Error> {
// We only hold the lock while pushing the request into the requests channel
let fut = {
let mut api = self.0.lock().await;
api.submit_extrinsic(extrinsic).await
};
Ok(fut.await.map_err(Error)?)
}
}
+101
View File
@@ -0,0 +1,101 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate offchain API.
#[cfg(test)]
mod tests;
use self::error::Error;
use jsonrpsee::{core::async_trait, Extensions};
use parking_lot::RwLock;
use sc_rpc_api::check_if_safe;
/// Re-export the API for backward compatibility.
pub use sc_rpc_api::offchain::*;
use sp_core::{
offchain::{OffchainStorage, StorageKind},
Bytes,
};
use std::sync::Arc;
/// Offchain API
#[derive(Debug)]
pub struct Offchain<T: OffchainStorage> {
/// Offchain storage
storage: Arc<RwLock<T>>,
}
impl<T: OffchainStorage> Offchain<T> {
/// Create new instance of Offchain API.
pub fn new(storage: T) -> Self {
Offchain { storage: Arc::new(RwLock::new(storage)) }
}
}
#[async_trait]
impl<T: OffchainStorage + 'static> OffchainApiServer for Offchain<T> {
fn set_local_storage(
&self,
ext: &Extensions,
kind: StorageKind,
key: Bytes,
value: Bytes,
) -> Result<(), Error> {
check_if_safe(ext)?;
let prefix = match kind {
StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX,
StorageKind::LOCAL => return Err(Error::UnavailableStorageKind),
};
self.storage.write().set(prefix, &key, &value);
Ok(())
}
fn clear_local_storage(
&self,
ext: &Extensions,
kind: StorageKind,
key: Bytes,
) -> Result<(), Error> {
check_if_safe(ext)?;
let prefix = match kind {
StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX,
StorageKind::LOCAL => return Err(Error::UnavailableStorageKind),
};
self.storage.write().remove(prefix, &key);
Ok(())
}
fn get_local_storage(
&self,
ext: &Extensions,
kind: StorageKind,
key: Bytes,
) -> Result<Option<Bytes>, Error> {
check_if_safe(ext)?;
let prefix = match kind {
StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX,
StorageKind::LOCAL => return Err(Error::UnavailableStorageKind),
};
Ok(self.storage.read().get(prefix, &key).map(Into::into))
}
}
@@ -0,0 +1,75 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use super::*;
use crate::testing::{allow_unsafe, deny_unsafe};
use assert_matches::assert_matches;
use sp_core::{offchain::storage::InMemOffchainStorage, Bytes};
#[test]
fn local_storage_should_work() {
let storage = InMemOffchainStorage::default();
let offchain = Offchain::new(storage);
let key = Bytes(b"offchain_storage".to_vec());
let value = Bytes(b"offchain_value".to_vec());
let ext = allow_unsafe();
assert_matches!(
offchain.set_local_storage(&ext, StorageKind::PERSISTENT, key.clone(), value.clone()),
Ok(())
);
assert_matches!(
offchain.get_local_storage(&ext, StorageKind::PERSISTENT, key.clone()),
Ok(Some(ref v)) if *v == value
);
assert_matches!(
offchain.clear_local_storage(&ext, StorageKind::PERSISTENT, key.clone()),
Ok(())
);
assert_matches!(offchain.get_local_storage(&ext, StorageKind::PERSISTENT, key), Ok(None));
}
#[test]
fn offchain_calls_considered_unsafe() {
let storage = InMemOffchainStorage::default();
let offchain = Offchain::new(storage);
let key = Bytes(b"offchain_storage".to_vec());
let value = Bytes(b"offchain_value".to_vec());
let ext = deny_unsafe();
assert_matches!(
offchain.set_local_storage(&ext, StorageKind::PERSISTENT, key.clone(), value.clone()),
Err(Error::UnsafeRpcCalled(e)) => {
assert_eq!(e.to_string(), "RPC call is unsafe to be called externally")
}
);
assert_matches!(
offchain.clear_local_storage(&ext, StorageKind::PERSISTENT, key.clone()),
Err(Error::UnsafeRpcCalled(e)) => {
assert_eq!(e.to_string(), "RPC call is unsafe to be called externally")
}
);
assert_matches!(
offchain.get_local_storage(&ext, StorageKind::PERSISTENT, key),
Err(Error::UnsafeRpcCalled(e)) => {
assert_eq!(e.to_string(), "RPC call is unsafe to be called externally")
}
);
}
+502
View File
@@ -0,0 +1,502 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate state API.
mod state_full;
mod utils;
#[cfg(test)]
mod tests;
use crate::SubscriptionTaskExecutor;
use jsonrpsee::{core::async_trait, Extensions, PendingSubscriptionSink};
use sc_client_api::{
Backend, BlockBackend, BlockchainEvents, ExecutorProvider, ProofProvider, StorageProvider,
};
use sc_rpc_api::{check_if_safe, DenyUnsafe};
use sc_tracing::block::TracingExecuteBlock;
use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi};
use sp_blockchain::{HeaderBackend, HeaderMetadata};
use sp_core::{
storage::{PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey},
Bytes,
};
use sp_runtime::traits::Block as BlockT;
use sp_version::RuntimeVersion;
use std::sync::Arc;
pub use sc_rpc_api::{child_state::*, state::*};
const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000;
/// State backend API.
#[async_trait]
pub trait StateBackend<Block: BlockT, Client>: Send + Sync + 'static
where
Block: BlockT + 'static,
Client: Send + Sync + 'static,
{
/// Call runtime method at given block.
fn call(
&self,
block: Option<Block::Hash>,
method: String,
call_data: Bytes,
) -> Result<Bytes, Error>;
/// Returns the keys with prefix, leave empty to get all the keys.
fn storage_keys(
&self,
block: Option<Block::Hash>,
prefix: StorageKey,
) -> Result<Vec<StorageKey>, Error>;
/// Returns the keys with prefix along with their values, leave empty to get all the pairs.
fn storage_pairs(
&self,
block: Option<Block::Hash>,
prefix: StorageKey,
) -> Result<Vec<(StorageKey, StorageData)>, Error>;
/// Returns the keys with prefix with pagination support.
fn storage_keys_paged(
&self,
block: Option<Block::Hash>,
prefix: Option<StorageKey>,
count: u32,
start_key: Option<StorageKey>,
) -> Result<Vec<StorageKey>, Error>;
/// Returns a storage entry at a specific block's state.
fn storage(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> Result<Option<StorageData>, Error>;
/// Returns the hash of a storage entry at a block's state.
fn storage_hash(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> Result<Option<Block::Hash>, Error>;
/// Returns the size of a storage entry at a block's state.
///
/// If data is available at `key`, it is returned. Else, the sum of values who's key has `key`
/// prefix is returned, i.e. all the storage (double) maps that have this prefix.
async fn storage_size(
&self,
block: Option<Block::Hash>,
key: StorageKey,
deny_unsafe: DenyUnsafe,
) -> Result<Option<u64>, Error>;
/// Returns the runtime metadata as an opaque blob.
fn metadata(&self, block: Option<Block::Hash>) -> Result<Bytes, Error>;
/// Get the runtime version.
fn runtime_version(&self, block: Option<Block::Hash>) -> Result<RuntimeVersion, Error>;
/// Query historical storage entries (by key) starting from a block given as the second
/// parameter.
///
/// NOTE This first returned result contains the initial state of storage for all keys.
/// Subsequent values in the vector represent changes to the previous state (diffs).
fn query_storage(
&self,
from: Block::Hash,
to: Option<Block::Hash>,
keys: Vec<StorageKey>,
) -> Result<Vec<StorageChangeSet<Block::Hash>>, Error>;
/// Query storage entries (by key) starting at block hash given as the second parameter.
fn query_storage_at(
&self,
keys: Vec<StorageKey>,
at: Option<Block::Hash>,
) -> Result<Vec<StorageChangeSet<Block::Hash>>, Error>;
/// Returns proof of storage entries at a specific block's state.
fn read_proof(
&self,
block: Option<Block::Hash>,
keys: Vec<StorageKey>,
) -> Result<ReadProof<Block::Hash>, Error>;
/// Trace storage changes for block
fn trace_block(
&self,
block: Block::Hash,
targets: Option<String>,
storage_keys: Option<String>,
methods: Option<String>,
) -> Result<sp_rpc::tracing::TraceBlockResponse, Error>;
/// New runtime version subscription
fn subscribe_runtime_version(&self, pending: PendingSubscriptionSink);
/// New storage subscription
fn subscribe_storage(
&self,
pending: PendingSubscriptionSink,
keys: Option<Vec<StorageKey>>,
deny_unsafe: DenyUnsafe,
);
}
/// Create new state API that works on full node.
pub fn new_full<BE, Block: BlockT, Client>(
client: Arc<Client>,
executor: SubscriptionTaskExecutor,
execute_block: Option<Arc<dyn TracingExecuteBlock<Block>>>,
) -> (State<Block, Client>, ChildState<Block, Client>)
where
Block: BlockT + 'static,
Block::Hash: Unpin,
BE: Backend<Block> + 'static,
Client: ExecutorProvider<Block>
+ StorageProvider<Block, BE>
+ ProofProvider<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error>
+ BlockchainEvents<Block>
+ CallApiAt<Block>
+ HeaderBackend<Block>
+ BlockBackend<Block>
+ ProvideRuntimeApi<Block>
+ Send
+ Sync
+ 'static,
Client::Api: Metadata<Block>,
{
let child_backend = Box::new(self::state_full::FullState::new(
client.clone(),
executor.clone(),
execute_block.clone(),
));
let backend =
Box::new(self::state_full::FullState::new(client, executor, execute_block.clone()));
(State { backend }, ChildState { backend: child_backend })
}
/// State API with subscriptions support.
pub struct State<Block, Client> {
backend: Box<dyn StateBackend<Block, Client>>,
}
#[async_trait]
impl<Block, Client> StateApiServer<Block::Hash> for State<Block, Client>
where
Block: BlockT + 'static,
Client: Send + Sync + 'static,
{
fn call(
&self,
method: String,
data: Bytes,
block: Option<Block::Hash>,
) -> Result<Bytes, Error> {
self.backend.call(block, method, data).map_err(Into::into)
}
fn storage_keys(
&self,
key_prefix: StorageKey,
block: Option<Block::Hash>,
) -> Result<Vec<StorageKey>, Error> {
self.backend.storage_keys(block, key_prefix).map_err(Into::into)
}
fn storage_pairs(
&self,
ext: &Extensions,
key_prefix: StorageKey,
block: Option<Block::Hash>,
) -> Result<Vec<(StorageKey, StorageData)>, Error> {
check_if_safe(ext)?;
self.backend.storage_pairs(block, key_prefix).map_err(Into::into)
}
fn storage_keys_paged(
&self,
prefix: Option<StorageKey>,
count: u32,
start_key: Option<StorageKey>,
block: Option<Block::Hash>,
) -> Result<Vec<StorageKey>, Error> {
if count > STORAGE_KEYS_PAGED_MAX_COUNT {
return Err(Error::InvalidCount { value: count, max: STORAGE_KEYS_PAGED_MAX_COUNT });
}
self.backend
.storage_keys_paged(block, prefix, count, start_key)
.map_err(Into::into)
}
fn storage(
&self,
key: StorageKey,
block: Option<Block::Hash>,
) -> Result<Option<StorageData>, Error> {
self.backend.storage(block, key).map_err(Into::into)
}
fn storage_hash(
&self,
key: StorageKey,
block: Option<Block::Hash>,
) -> Result<Option<Block::Hash>, Error> {
self.backend.storage_hash(block, key).map_err(Into::into)
}
async fn storage_size(
&self,
ext: &Extensions,
key: StorageKey,
block: Option<Block::Hash>,
) -> Result<Option<u64>, Error> {
let deny_unsafe = ext
.get::<DenyUnsafe>()
.cloned()
.expect("DenyUnsafe extension is always set by the substrate rpc server; qed");
self.backend.storage_size(block, key, deny_unsafe).await.map_err(Into::into)
}
fn metadata(&self, block: Option<Block::Hash>) -> Result<Bytes, Error> {
self.backend.metadata(block).map_err(Into::into)
}
fn runtime_version(&self, at: Option<Block::Hash>) -> Result<RuntimeVersion, Error> {
self.backend.runtime_version(at).map_err(Into::into)
}
fn query_storage(
&self,
ext: &Extensions,
keys: Vec<StorageKey>,
from: Block::Hash,
to: Option<Block::Hash>,
) -> Result<Vec<StorageChangeSet<Block::Hash>>, Error> {
check_if_safe(ext)?;
self.backend.query_storage(from, to, keys).map_err(Into::into)
}
fn query_storage_at(
&self,
keys: Vec<StorageKey>,
at: Option<Block::Hash>,
) -> Result<Vec<StorageChangeSet<Block::Hash>>, Error> {
self.backend.query_storage_at(keys, at).map_err(Into::into)
}
fn read_proof(
&self,
keys: Vec<StorageKey>,
block: Option<Block::Hash>,
) -> Result<ReadProof<Block::Hash>, Error> {
self.backend.read_proof(block, keys).map_err(Into::into)
}
/// Re-execute the given block with the tracing targets given in `targets`
/// and capture all state changes.
///
/// Note: requires the node to run with `--rpc-methods=Unsafe`.
/// Note: requires runtimes compiled with wasm tracing support, `--features with-tracing`.
fn trace_block(
&self,
ext: &Extensions,
block: Block::Hash,
targets: Option<String>,
storage_keys: Option<String>,
methods: Option<String>,
) -> Result<sp_rpc::tracing::TraceBlockResponse, Error> {
check_if_safe(ext)?;
self.backend
.trace_block(block, targets, storage_keys, methods)
.map_err(Into::into)
}
fn subscribe_runtime_version(&self, pending: PendingSubscriptionSink) {
self.backend.subscribe_runtime_version(pending)
}
fn subscribe_storage(
&self,
pending: PendingSubscriptionSink,
ext: &Extensions,
keys: Option<Vec<StorageKey>>,
) {
let deny_unsafe = ext
.get::<DenyUnsafe>()
.cloned()
.expect("DenyUnsafe extension is always set by the substrate rpc server; qed");
self.backend.subscribe_storage(pending, keys, deny_unsafe)
}
}
/// Child state backend API.
pub trait ChildStateBackend<Block: BlockT, Client>: Send + Sync + 'static
where
Block: BlockT + 'static,
Client: Send + Sync + 'static,
{
/// Returns proof of storage for a child key entries at a specific block's state.
fn read_child_proof(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> Result<ReadProof<Block::Hash>, Error>;
/// Returns the keys with prefix from a child storage,
/// leave prefix empty to get all the keys.
fn storage_keys(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
prefix: StorageKey,
) -> Result<Vec<StorageKey>, Error>;
/// Returns the keys with prefix from a child storage with pagination support.
fn storage_keys_paged(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
prefix: Option<StorageKey>,
count: u32,
start_key: Option<StorageKey>,
) -> Result<Vec<StorageKey>, Error>;
/// Returns a child storage entry at a specific block's state.
fn storage(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> Result<Option<StorageData>, Error>;
/// Returns child storage entries at a specific block's state.
fn storage_entries(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> Result<Vec<Option<StorageData>>, Error>;
/// Returns the hash of a child storage entry at a block's state.
fn storage_hash(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> Result<Option<Block::Hash>, Error>;
/// Returns the size of a child storage entry at a block's state.
fn storage_size(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> Result<Option<u64>, Error> {
self.storage(block, storage_key, key).map(|x| x.map(|x| x.0.len() as u64))
}
}
/// Child state API with subscriptions support.
pub struct ChildState<Block, Client> {
backend: Box<dyn ChildStateBackend<Block, Client>>,
}
impl<Block, Client> ChildStateApiServer<Block::Hash> for ChildState<Block, Client>
where
Block: BlockT + 'static,
Client: Send + Sync + 'static,
{
fn storage_keys(
&self,
storage_key: PrefixedStorageKey,
key_prefix: StorageKey,
block: Option<Block::Hash>,
) -> Result<Vec<StorageKey>, Error> {
self.backend.storage_keys(block, storage_key, key_prefix).map_err(Into::into)
}
fn storage_keys_paged(
&self,
storage_key: PrefixedStorageKey,
prefix: Option<StorageKey>,
count: u32,
start_key: Option<StorageKey>,
block: Option<Block::Hash>,
) -> Result<Vec<StorageKey>, Error> {
self.backend
.storage_keys_paged(block, storage_key, prefix, count, start_key)
.map_err(Into::into)
}
fn storage(
&self,
storage_key: PrefixedStorageKey,
key: StorageKey,
block: Option<Block::Hash>,
) -> Result<Option<StorageData>, Error> {
self.backend.storage(block, storage_key, key).map_err(Into::into)
}
fn storage_entries(
&self,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
block: Option<Block::Hash>,
) -> Result<Vec<Option<StorageData>>, Error> {
self.backend.storage_entries(block, storage_key, keys).map_err(Into::into)
}
fn storage_hash(
&self,
storage_key: PrefixedStorageKey,
key: StorageKey,
block: Option<Block::Hash>,
) -> Result<Option<Block::Hash>, Error> {
self.backend.storage_hash(block, storage_key, key).map_err(Into::into)
}
fn storage_size(
&self,
storage_key: PrefixedStorageKey,
key: StorageKey,
block: Option<Block::Hash>,
) -> Result<Option<u64>, Error> {
self.backend.storage_size(block, storage_key, key).map_err(Into::into)
}
fn read_child_proof(
&self,
child_storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
block: Option<Block::Hash>,
) -> Result<ReadProof<Block::Hash>, Error> {
self.backend
.read_child_proof(block, child_storage_key, keys)
.map_err(Into::into)
}
}
fn client_err(err: sp_blockchain::Error) -> Error {
Error::Client(Box::new(err))
}
@@ -0,0 +1,656 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! State API backend for full nodes.
use std::{collections::HashMap, marker::PhantomData, sync::Arc, time::Duration};
use super::{
client_err,
error::{Error, Result},
ChildStateBackend, StateBackend,
};
use crate::{
utils::{spawn_subscription_task, BoundedVecDeque, PendingSubscription},
DenyUnsafe, SubscriptionTaskExecutor,
};
use futures::{future, stream, StreamExt};
use jsonrpsee::{core::async_trait, types::ErrorObject, PendingSubscriptionSink};
use sc_client_api::{
Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, ProofProvider,
StorageProvider,
};
use sc_rpc_api::state::ReadProof;
use sc_tracing::block::TracingExecuteBlock;
use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi};
use sp_blockchain::{
CachedHeaderMetadata, Error as ClientError, HeaderBackend, HeaderMetadata,
Result as ClientResult,
};
use sp_core::{
storage::{
ChildInfo, ChildType, PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey,
},
traits::CallContext,
Bytes,
};
use sp_runtime::traits::Block as BlockT;
use sp_version::RuntimeVersion;
/// The maximum time allowed for an RPC call when running without unsafe RPC enabled.
const MAXIMUM_SAFE_RPC_CALL_TIMEOUT: Duration = Duration::from_secs(30);
/// Ranges to query in `state_queryStorage`.
struct QueryStorageRange<Block: BlockT> {
/// Hashes of all the blocks in the range.
pub hashes: Vec<Block::Hash>,
}
/// State API backend for full nodes.
pub struct FullState<BE, Block: BlockT, Client> {
client: Arc<Client>,
executor: SubscriptionTaskExecutor,
execute_block: Option<Arc<dyn TracingExecuteBlock<Block>>>,
_phantom: PhantomData<BE>,
}
impl<BE, Block: BlockT, Client> FullState<BE, Block, Client>
where
BE: Backend<Block>,
Client: StorageProvider<Block, BE>
+ HeaderBackend<Block>
+ BlockBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error>,
Block: BlockT + 'static,
{
/// Create new state API backend for full nodes.
pub fn new(
client: Arc<Client>,
executor: SubscriptionTaskExecutor,
execute_block: Option<Arc<dyn TracingExecuteBlock<Block>>>,
) -> Self {
Self { client, executor, execute_block, _phantom: PhantomData }
}
/// Returns given block hash or best block hash if None is passed.
fn block_or_best(&self, hash: Option<Block::Hash>) -> ClientResult<Block::Hash> {
Ok(hash.unwrap_or_else(|| self.client.info().best_hash))
}
/// Validates block range.
fn query_storage_range(
&self,
from: Block::Hash,
to: Option<Block::Hash>,
) -> Result<QueryStorageRange<Block>> {
let to = self
.block_or_best(to)
.map_err(|e| invalid_block::<Block>(from, to, e.to_string()))?;
let invalid_block_err =
|e: ClientError| invalid_block::<Block>(from, Some(to), e.to_string());
let from_meta = self.client.header_metadata(from).map_err(invalid_block_err)?;
let to_meta = self.client.header_metadata(to).map_err(invalid_block_err)?;
if from_meta.number > to_meta.number {
return Err(invalid_block_range(
&from_meta,
&to_meta,
"from number > to number".to_owned(),
));
}
// check if we can get from `to` to `from` by going through parent_hashes.
let from_number = from_meta.number;
let hashes = {
let mut hashes = vec![to_meta.hash];
let mut last = to_meta.clone();
while last.number > from_number {
let header_metadata = self
.client
.header_metadata(last.parent)
.map_err(|e| invalid_block_range::<Block>(&last, &to_meta, e.to_string()))?;
hashes.push(header_metadata.hash);
last = header_metadata;
}
if last.hash != from_meta.hash {
return Err(invalid_block_range(
&from_meta,
&to_meta,
"from and to are on different forks".to_owned(),
));
}
hashes.reverse();
hashes
};
Ok(QueryStorageRange { hashes })
}
/// Iterates through range.unfiltered_range and check each block for changes of keys' values.
fn query_storage_unfiltered(
&self,
range: &QueryStorageRange<Block>,
keys: &[StorageKey],
last_values: &mut HashMap<StorageKey, Option<StorageData>>,
changes: &mut Vec<StorageChangeSet<Block::Hash>>,
) -> Result<()> {
for block_hash in &range.hashes {
let mut block_changes = StorageChangeSet { block: *block_hash, changes: Vec::new() };
for key in keys {
let (has_changed, data) = {
let curr_data = self.client.storage(*block_hash, key).map_err(client_err)?;
match last_values.get(key) {
Some(prev_data) => (curr_data != *prev_data, curr_data),
None => (true, curr_data),
}
};
if has_changed {
block_changes.changes.push((key.clone(), data.clone()));
}
last_values.insert(key.clone(), data);
}
if !block_changes.changes.is_empty() {
changes.push(block_changes);
}
}
Ok(())
}
}
#[async_trait]
impl<BE, Block, Client> StateBackend<Block, Client> for FullState<BE, Block, Client>
where
Block: BlockT + 'static,
Block::Hash: Unpin,
BE: Backend<Block> + 'static,
Client: ExecutorProvider<Block>
+ StorageProvider<Block, BE>
+ ProofProvider<Block>
+ HeaderBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error>
+ BlockchainEvents<Block>
+ CallApiAt<Block>
+ ProvideRuntimeApi<Block>
+ BlockBackend<Block>
+ Send
+ Sync
+ 'static,
Client::Api: Metadata<Block>,
{
fn call(
&self,
block: Option<Block::Hash>,
method: String,
call_data: Bytes,
) -> std::result::Result<Bytes, Error> {
self.block_or_best(block)
.and_then(|block| {
self.client
.executor()
.call(block, &method, &call_data, CallContext::Offchain)
.map(Into::into)
})
.map_err(client_err)
}
// TODO: This is horribly broken; either remove it, or make it streaming.
fn storage_keys(
&self,
block: Option<Block::Hash>,
prefix: StorageKey,
) -> std::result::Result<Vec<StorageKey>, Error> {
// TODO: Remove the `.collect`.
self.block_or_best(block)
.and_then(|block| self.client.storage_keys(block, Some(&prefix), None))
.map(|iter| iter.collect())
.map_err(client_err)
}
// TODO: This is horribly broken; either remove it, or make it streaming.
fn storage_pairs(
&self,
block: Option<Block::Hash>,
prefix: StorageKey,
) -> std::result::Result<Vec<(StorageKey, StorageData)>, Error> {
// TODO: Remove the `.collect`.
self.block_or_best(block)
.and_then(|block| self.client.storage_pairs(block, Some(&prefix), None))
.map(|iter| iter.collect())
.map_err(client_err)
}
fn storage_keys_paged(
&self,
block: Option<Block::Hash>,
prefix: Option<StorageKey>,
count: u32,
start_key: Option<StorageKey>,
) -> std::result::Result<Vec<StorageKey>, Error> {
self.block_or_best(block)
.and_then(|block| self.client.storage_keys(block, prefix.as_ref(), start_key.as_ref()))
.map(|iter| iter.take(count as usize).collect())
.map_err(client_err)
}
fn storage(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> std::result::Result<Option<StorageData>, Error> {
self.block_or_best(block)
.and_then(|block| self.client.storage(block, &key))
.map_err(client_err)
}
async fn storage_size(
&self,
block: Option<Block::Hash>,
key: StorageKey,
deny_unsafe: DenyUnsafe,
) -> std::result::Result<Option<u64>, Error> {
let block = match self.block_or_best(block) {
Ok(b) => b,
Err(e) => return Err(client_err(e)),
};
let client = self.client.clone();
let timeout = match deny_unsafe {
DenyUnsafe::Yes => Some(MAXIMUM_SAFE_RPC_CALL_TIMEOUT),
DenyUnsafe::No => None,
};
super::utils::spawn_blocking_with_timeout(timeout, move |is_timed_out| {
// Does the key point to a concrete entry in the database?
match client.storage(block, &key) {
Ok(Some(d)) => return Ok(Ok(Some(d.0.len() as u64))),
Err(e) => return Ok(Err(client_err(e))),
Ok(None) => {},
}
// The key doesn't point to anything, so it's probably a prefix.
let iter = match client.storage_keys(block, Some(&key), None).map_err(client_err) {
Ok(iter) => iter,
Err(e) => return Ok(Err(e)),
};
let mut sum = 0;
for storage_key in iter {
let value = client.storage(block, &storage_key).ok().flatten().unwrap_or_default();
sum += value.0.len() as u64;
is_timed_out.check_if_timed_out()?;
}
if sum > 0 {
Ok(Ok(Some(sum)))
} else {
Ok(Ok(None))
}
})
.await
.map_err(|error| Error::Client(Box::new(error)))?
}
fn storage_hash(
&self,
block: Option<Block::Hash>,
key: StorageKey,
) -> std::result::Result<Option<Block::Hash>, Error> {
self.block_or_best(block)
.and_then(|block| self.client.storage_hash(block, &key))
.map_err(client_err)
}
fn metadata(&self, block: Option<Block::Hash>) -> std::result::Result<Bytes, Error> {
self.block_or_best(block).map_err(client_err).and_then(|block| {
self.client
.runtime_api()
.metadata(block)
.map(Into::into)
.map_err(|e| Error::Client(Box::new(e)))
})
}
fn runtime_version(
&self,
block: Option<Block::Hash>,
) -> std::result::Result<RuntimeVersion, Error> {
self.block_or_best(block).map_err(client_err).and_then(|block| {
self.client.runtime_version_at(block).map_err(|e| Error::Client(Box::new(e)))
})
}
fn query_storage(
&self,
from: Block::Hash,
to: Option<Block::Hash>,
keys: Vec<StorageKey>,
) -> std::result::Result<Vec<StorageChangeSet<Block::Hash>>, Error> {
let call_fn = move || {
let range = self.query_storage_range(from, to)?;
let mut changes = Vec::new();
let mut last_values = HashMap::new();
self.query_storage_unfiltered(&range, &keys, &mut last_values, &mut changes)?;
Ok(changes)
};
call_fn()
}
fn query_storage_at(
&self,
keys: Vec<StorageKey>,
at: Option<Block::Hash>,
) -> std::result::Result<Vec<StorageChangeSet<Block::Hash>>, Error> {
let at = at.unwrap_or_else(|| self.client.info().best_hash);
self.query_storage(at, Some(at), keys)
}
fn read_proof(
&self,
block: Option<Block::Hash>,
keys: Vec<StorageKey>,
) -> std::result::Result<ReadProof<Block::Hash>, Error> {
self.block_or_best(block)
.and_then(|block| {
self.client
.read_proof(block, &mut keys.iter().map(|key| key.0.as_ref()))
.map(|proof| proof.into_iter_nodes().map(|node| node.into()).collect())
.map(|proof| ReadProof { at: block, proof })
})
.map_err(client_err)
}
fn subscribe_runtime_version(&self, pending: PendingSubscriptionSink) {
let initial = match self
.block_or_best(None)
.and_then(|block| self.client.runtime_version_at(block).map_err(Into::into))
.map_err(|e| Error::Client(Box::new(e)))
{
Ok(initial) => initial,
Err(e) => {
spawn_subscription_task(&self.executor, pending.reject(e));
return;
},
};
let mut previous_version = initial.clone();
let client = self.client.clone();
// A stream of new versions
let version_stream = client
.import_notification_stream()
.filter(|n| future::ready(n.is_new_best))
.filter_map(move |n| {
let version =
client.runtime_version_at(n.hash).map_err(|e| Error::Client(Box::new(e)));
match version {
Ok(version) if version != previous_version => {
previous_version = version.clone();
future::ready(Some(version))
},
_ => future::ready(None),
}
});
let stream = futures::stream::once(future::ready(initial)).chain(version_stream);
spawn_subscription_task(
&self.executor,
PendingSubscription::from(pending).pipe_from_stream(stream, BoundedVecDeque::default()),
);
}
fn subscribe_storage(
&self,
pending: PendingSubscriptionSink,
keys: Option<Vec<StorageKey>>,
deny_unsafe: DenyUnsafe,
) {
if keys.is_none() {
if let Err(err) = deny_unsafe.check_if_safe() {
spawn_subscription_task(&self.executor, pending.reject(ErrorObject::from(err)));
return;
}
}
let stream = match self.client.storage_changes_notification_stream(keys.as_deref(), None) {
Ok(stream) => stream,
Err(blockchain_err) => {
spawn_subscription_task(
&self.executor,
pending.reject(Error::Client(Box::new(blockchain_err))),
);
return;
},
};
let initial = stream::iter(keys.map(|keys| {
let block = self.client.info().best_hash;
let changes = keys
.into_iter()
.map(|key| {
let v = self.client.storage(block, &key).ok().flatten();
(key, v)
})
.collect();
StorageChangeSet { block, changes }
}));
let storage_stream = stream.map(|storage_notif| StorageChangeSet {
block: storage_notif.block,
changes: storage_notif
.changes
.iter()
.filter_map(|(o_sk, k, v)| o_sk.is_none().then(|| (k.clone(), v.cloned())))
.collect(),
});
let stream = initial
.chain(storage_stream)
.filter(|storage| future::ready(!storage.changes.is_empty()));
spawn_subscription_task(
&self.executor,
PendingSubscription::from(pending).pipe_from_stream(stream, BoundedVecDeque::default()),
);
}
fn trace_block(
&self,
block: Block::Hash,
targets: Option<String>,
storage_keys: Option<String>,
methods: Option<String>,
) -> std::result::Result<sp_rpc::tracing::TraceBlockResponse, Error> {
sc_tracing::block::BlockExecutor::new(
self.client.clone(),
block,
targets,
storage_keys,
methods,
self.execute_block.clone(),
)
.trace_block()
.map_err(|e| invalid_block::<Block>(block, None, e.to_string()))
}
}
impl<BE, Block, Client> ChildStateBackend<Block, Client> for FullState<BE, Block, Client>
where
Block: BlockT + 'static,
BE: Backend<Block> + 'static,
Client: ExecutorProvider<Block>
+ StorageProvider<Block, BE>
+ ProofProvider<Block>
+ HeaderBackend<Block>
+ BlockBackend<Block>
+ HeaderMetadata<Block, Error = sp_blockchain::Error>
+ BlockchainEvents<Block>
+ CallApiAt<Block>
+ ProvideRuntimeApi<Block>
+ Send
+ Sync
+ 'static,
Client::Api: Metadata<Block>,
{
fn read_child_proof(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> std::result::Result<ReadProof<Block::Hash>, Error> {
self.block_or_best(block)
.and_then(|block| {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) =>
ChildInfo::new_default(storage_key),
None => return Err(sp_blockchain::Error::InvalidChildStorageKey),
};
self.client
.read_child_proof(
block,
&child_info,
&mut keys.iter().map(|key| key.0.as_ref()),
)
.map(|proof| proof.into_iter_nodes().map(|node| node.into()).collect())
.map(|proof| ReadProof { at: block, proof })
})
.map_err(client_err)
}
fn storage_keys(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
prefix: StorageKey,
) -> std::result::Result<Vec<StorageKey>, Error> {
// TODO: Remove the `.collect`.
self.block_or_best(block)
.and_then(|block| {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) =>
ChildInfo::new_default(storage_key),
None => return Err(sp_blockchain::Error::InvalidChildStorageKey),
};
self.client.child_storage_keys(block, child_info, Some(&prefix), None)
})
.map(|iter| iter.collect())
.map_err(client_err)
}
fn storage_keys_paged(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
prefix: Option<StorageKey>,
count: u32,
start_key: Option<StorageKey>,
) -> std::result::Result<Vec<StorageKey>, Error> {
self.block_or_best(block)
.and_then(|block| {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) =>
ChildInfo::new_default(storage_key),
None => return Err(sp_blockchain::Error::InvalidChildStorageKey),
};
self.client.child_storage_keys(
block,
child_info,
prefix.as_ref(),
start_key.as_ref(),
)
})
.map(|iter| iter.take(count as usize).collect())
.map_err(client_err)
}
fn storage(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> std::result::Result<Option<StorageData>, Error> {
self.block_or_best(block)
.and_then(|block| {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) =>
ChildInfo::new_default(storage_key),
None => return Err(sp_blockchain::Error::InvalidChildStorageKey),
};
self.client.child_storage(block, &child_info, &key)
})
.map_err(client_err)
}
fn storage_entries(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
keys: Vec<StorageKey>,
) -> std::result::Result<Vec<Option<StorageData>>, Error> {
let child_info = if let Some((ChildType::ParentKeyId, storage_key)) =
ChildType::from_prefixed_key(&storage_key)
{
Arc::new(ChildInfo::new_default(storage_key))
} else {
return Err(client_err(sp_blockchain::Error::InvalidChildStorageKey));
};
let block = self.block_or_best(block).map_err(client_err)?;
let client = self.client.clone();
keys.into_iter()
.map(move |key| {
client.clone().child_storage(block, &child_info, &key).map_err(client_err)
})
.collect()
}
fn storage_hash(
&self,
block: Option<Block::Hash>,
storage_key: PrefixedStorageKey,
key: StorageKey,
) -> std::result::Result<Option<Block::Hash>, Error> {
self.block_or_best(block)
.and_then(|block| {
let child_info = match ChildType::from_prefixed_key(&storage_key) {
Some((ChildType::ParentKeyId, storage_key)) =>
ChildInfo::new_default(storage_key),
None => return Err(sp_blockchain::Error::InvalidChildStorageKey),
};
self.client.child_storage_hash(block, &child_info, &key)
})
.map_err(client_err)
}
}
fn invalid_block_range<B: BlockT>(
from: &CachedHeaderMetadata<B>,
to: &CachedHeaderMetadata<B>,
details: String,
) -> Error {
let to_string = |h: &CachedHeaderMetadata<B>| format!("{} ({:?})", h.number, h.hash);
Error::InvalidBlockRange { from: to_string(from), to: to_string(to), details }
}
fn invalid_block<B: BlockT>(from: B::Hash, to: Option<B::Hash>, details: String) -> Error {
Error::InvalidBlockRange { from: format!("{:?}", from), to: format!("{:?}", to), details }
}
+540
View File
@@ -0,0 +1,540 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use self::error::Error;
use super::*;
use crate::testing::{allow_unsafe, test_executor, timeout_secs};
use assert_matches::assert_matches;
use futures::executor;
use jsonrpsee::{core::EmptyServerParams as EmptyParams, MethodsError as RpcError};
use sc_block_builder::BlockBuilderBuilder;
use sp_consensus::BlockOrigin;
use sp_core::{hash::H256, storage::ChildInfo};
use std::sync::Arc;
use substrate_test_runtime_client::{
prelude::*,
runtime::{ExtrinsicBuilder, Transfer},
};
const STORAGE_KEY: &[u8] = b"child";
fn prefixed_storage_key() -> PrefixedStorageKey {
let child_info = ChildInfo::new_default(STORAGE_KEY);
child_info.prefixed_storage_key()
}
#[tokio::test]
async fn should_return_storage() {
const KEY: &[u8] = b":mock";
const VALUE: &[u8] = b"hello world";
const CHILD_VALUE: &[u8] = b"hello world !";
let child_info = ChildInfo::new_default(STORAGE_KEY);
let client = TestClientBuilder::new()
.add_extra_storage(KEY.to_vec(), VALUE.to_vec())
.add_extra_child_storage(&child_info, KEY.to_vec(), CHILD_VALUE.to_vec())
// similar to a map with two keys
.add_extra_storage(b":map:acc1".to_vec(), vec![1, 2])
.add_extra_storage(b":map:acc2".to_vec(), vec![1, 2, 3])
.build();
let genesis_hash = client.genesis_hash();
let (client, child) = new_full(Arc::new(client), test_executor(), None);
let key = StorageKey(KEY.to_vec());
let ext = allow_unsafe();
assert_eq!(
client
.storage(key.clone(), Some(genesis_hash).into())
.map(|x| x.map(|x| x.0.len()))
.unwrap()
.unwrap() as usize,
VALUE.len(),
);
assert_matches!(
client.storage_hash(key.clone(), Some(genesis_hash).into()).map(|x| x.is_some()),
Ok(true)
);
assert_eq!(
client.storage_size(&ext, key.clone(), None).await.unwrap().unwrap() as usize,
VALUE.len(),
);
assert_eq!(
client
.storage_size(&ext, StorageKey(b":map".to_vec()), None)
.await
.unwrap()
.unwrap() as usize,
2 + 3,
);
assert_eq!(
child
.storage(prefixed_storage_key(), key, Some(genesis_hash).into())
.map(|x| x.map(|x| x.0.len()))
.unwrap()
.unwrap() as usize,
CHILD_VALUE.len(),
);
}
#[tokio::test]
async fn should_return_storage_entries() {
const KEY1: &[u8] = b":mock";
const KEY2: &[u8] = b":turtle";
const VALUE: &[u8] = b"hello world";
const CHILD_VALUE1: &[u8] = b"hello world !";
const CHILD_VALUE2: &[u8] = b"hello world !";
let child_info = ChildInfo::new_default(STORAGE_KEY);
let client = TestClientBuilder::new()
.add_extra_storage(KEY1.to_vec(), VALUE.to_vec())
.add_extra_child_storage(&child_info, KEY1.to_vec(), CHILD_VALUE1.to_vec())
.add_extra_child_storage(&child_info, KEY2.to_vec(), CHILD_VALUE2.to_vec())
.build();
let genesis_hash = client.genesis_hash();
let (_client, child) = new_full(Arc::new(client), test_executor(), None);
let keys = &[StorageKey(KEY1.to_vec()), StorageKey(KEY2.to_vec())];
assert_eq!(
child
.storage_entries(prefixed_storage_key(), keys.to_vec(), Some(genesis_hash).into())
.map(|x| x.into_iter().map(|x| x.map(|x| x.0.len()).unwrap()).sum::<usize>())
.unwrap(),
CHILD_VALUE1.len() + CHILD_VALUE2.len()
);
// should fail if not all keys exist.
let mut failing_keys = vec![StorageKey(b":soup".to_vec())];
failing_keys.extend_from_slice(keys);
assert_matches!(
child
.storage_entries(prefixed_storage_key(), failing_keys, Some(genesis_hash).into())
.map(|x| x.iter().all(|x| x.is_some())),
Ok(false)
);
}
#[tokio::test]
async fn should_return_child_storage() {
let child_info = ChildInfo::new_default(STORAGE_KEY);
let client = Arc::new(
substrate_test_runtime_client::TestClientBuilder::new()
.add_child_storage(&child_info, "key", vec![42_u8])
.build(),
);
let genesis_hash = client.genesis_hash();
let (_client, child) = new_full(client, test_executor(), None);
let child_key = prefixed_storage_key();
let key = StorageKey(b"key".to_vec());
assert_matches!(
child.storage(
child_key.clone(),
key.clone(),
Some(genesis_hash).into(),
),
Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1
);
assert_matches!(
child
.storage_hash(child_key.clone(), key.clone(), Some(genesis_hash).into(),)
.map(|x| x.is_some()),
Ok(true)
);
assert_matches!(child.storage_size(child_key.clone(), key.clone(), None), Ok(Some(1)));
}
#[tokio::test]
async fn should_return_child_storage_entries() {
let child_info = ChildInfo::new_default(STORAGE_KEY);
let client = Arc::new(
substrate_test_runtime_client::TestClientBuilder::new()
.add_child_storage(&child_info, "key1", vec![42_u8])
.add_child_storage(&child_info, "key2", vec![43_u8, 44])
.build(),
);
let genesis_hash = client.genesis_hash();
let (_client, child) = new_full(client, test_executor(), None);
let child_key = prefixed_storage_key();
let keys = vec![StorageKey(b"key1".to_vec()), StorageKey(b"key2".to_vec())];
let res = child
.storage_entries(child_key.clone(), keys.clone(), Some(genesis_hash).into())
.unwrap();
assert_matches!(
res[0],
Some(StorageData(ref d))
if d[0] == 42 && d.len() == 1
);
assert_matches!(
res[1],
Some(StorageData(ref d))
if d[0] == 43 && d[1] == 44 && d.len() == 2
);
assert_matches!(
child
.storage_hash(child_key.clone(), keys[0].clone(), Some(genesis_hash).into())
.map(|x| x.is_some()),
Ok(true)
);
assert_matches!(child.storage_size(child_key.clone(), keys[0].clone(), None), Ok(Some(1)));
}
#[tokio::test]
async fn should_call_contract() {
let client = Arc::new(substrate_test_runtime_client::new());
let genesis_hash = client.genesis_hash();
let (client, _child) = new_full(client, test_executor(), None);
assert_matches!(
client.call("balanceOf".into(), Bytes(vec![1, 2, 3]), Some(genesis_hash).into()),
Err(Error::Client(_))
)
}
#[tokio::test]
async fn should_notify_about_storage_changes() {
let mut sub = {
let client = Arc::new(substrate_test_runtime_client::new());
let (api, _child) = new_full(client.clone(), test_executor(), None);
let mut api_rpc = api.into_rpc();
api_rpc.extensions_mut().insert(DenyUnsafe::No);
let sub = api_rpc
.subscribe_unbounded("state_subscribeStorage", EmptyParams::new())
.await
.unwrap();
// Cause a change:
let mut builder = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().best_hash)
.with_parent_block_number(client.chain_info().best_number)
.build()
.unwrap();
builder
.push_transfer(Transfer {
from: Sr25519Keyring::Alice.into(),
to: Sr25519Keyring::Ferdie.into(),
amount: 42,
nonce: 0,
})
.unwrap();
let block = builder.build().unwrap().block;
client.import(BlockOrigin::Own, block).await.unwrap();
sub
};
// We should get a message back on our subscription about the storage change:
// NOTE: previous versions of the subscription code used to return an empty value for the
// "initial" storage change here
assert_matches!(timeout_secs(1, sub.next::<StorageChangeSet<H256>>()).await, Ok(Some(_)));
}
#[tokio::test]
async fn should_send_initial_storage_changes_and_notifications() {
let mut sub = {
let client = Arc::new(substrate_test_runtime_client::new());
let (api, _child) = new_full(client.clone(), test_executor(), None);
let alice_balance_key = [
sp_crypto_hashing::twox_128(b"System"),
sp_crypto_hashing::twox_128(b"Account"),
sp_crypto_hashing::blake2_128(&Sr25519Keyring::Alice.public()),
]
.concat()
.iter()
.chain(Sr25519Keyring::Alice.public().0.iter())
.cloned()
.collect::<Vec<u8>>();
let mut api_rpc = api.into_rpc();
api_rpc.extensions_mut().insert(DenyUnsafe::No);
let sub = api_rpc
.subscribe_unbounded(
"state_subscribeStorage",
[[StorageKey(alice_balance_key.to_vec())]],
)
.await
.unwrap();
let mut builder = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().best_hash)
.with_parent_block_number(client.chain_info().best_number)
.build()
.unwrap();
builder
.push_transfer(Transfer {
from: Sr25519Keyring::Alice.into(),
to: Sr25519Keyring::Ferdie.into(),
amount: 42,
nonce: 0,
})
.unwrap();
let block = builder.build().unwrap().block;
client.import(BlockOrigin::Own, block).await.unwrap();
sub
};
assert_matches!(timeout_secs(1, sub.next::<StorageChangeSet<H256>>()).await, Ok(Some(_)));
assert_matches!(timeout_secs(1, sub.next::<StorageChangeSet<H256>>()).await, Ok(Some(_)));
}
#[tokio::test]
async fn should_query_storage() {
async fn run_tests(client: Arc<TestClient>) {
let (api, _child) = new_full(client.clone(), test_executor(), None);
let add_block = |index| {
let mut builder = BlockBuilderBuilder::new(&*client)
.on_parent_block(client.chain_info().best_hash)
.with_parent_block_number(client.chain_info().best_number)
.build()
.unwrap();
// fake change: None -> None -> None
builder
.push(ExtrinsicBuilder::new_storage_change(vec![1], None).build())
.unwrap();
// fake change: None -> Some(value) -> Some(value)
builder
.push(ExtrinsicBuilder::new_storage_change(vec![2], Some(vec![2])).build())
.unwrap();
// actual change: None -> Some(value) -> None
builder
.push(
ExtrinsicBuilder::new_storage_change(
vec![3],
if index == 0 { Some(vec![3]) } else { None },
)
.build(),
)
.unwrap();
// actual change: None -> Some(value)
builder
.push(
ExtrinsicBuilder::new_storage_change(
vec![4],
if index == 0 { None } else { Some(vec![4]) },
)
.build(),
)
.unwrap();
// actual change: Some(value1) -> Some(value2)
builder
.push(
ExtrinsicBuilder::new_storage_change(vec![5], Some(vec![index as u8])).build(),
)
.unwrap();
let block = builder.build().unwrap().block;
let hash = block.header.hash();
executor::block_on(client.import(BlockOrigin::Own, block)).unwrap();
hash
};
let block1_hash = add_block(0);
let block2_hash = add_block(1);
let genesis_hash = client.genesis_hash();
let mut expected = vec![
StorageChangeSet {
block: genesis_hash,
changes: vec![
(StorageKey(vec![1]), None),
(StorageKey(vec![2]), None),
(StorageKey(vec![3]), None),
(StorageKey(vec![4]), None),
(StorageKey(vec![5]), None),
],
},
StorageChangeSet {
block: block1_hash,
changes: vec![
(StorageKey(vec![2]), Some(StorageData(vec![2]))),
(StorageKey(vec![3]), Some(StorageData(vec![3]))),
(StorageKey(vec![5]), Some(StorageData(vec![0]))),
],
},
];
let ext = allow_unsafe();
// Query changes only up to block1
let keys = (1..6).map(|k| StorageKey(vec![k])).collect::<Vec<_>>();
let result = api.query_storage(&ext, keys.clone(), genesis_hash, Some(block1_hash).into());
assert_eq!(result.unwrap(), expected);
// Query all changes
let result = api.query_storage(&ext, keys.clone(), genesis_hash, None.into());
expected.push(StorageChangeSet {
block: block2_hash,
changes: vec![
(StorageKey(vec![3]), None),
(StorageKey(vec![4]), Some(StorageData(vec![4]))),
(StorageKey(vec![5]), Some(StorageData(vec![1]))),
],
});
assert_eq!(result.unwrap(), expected);
// Query changes up to block2.
let result = api.query_storage(&ext, keys.clone(), genesis_hash, Some(block2_hash));
assert_eq!(result.unwrap(), expected);
// Inverted range.
assert_matches!(
api.query_storage(&ext, keys.clone(), block1_hash, Some(genesis_hash)),
Err(Error::InvalidBlockRange { from, to, details }) if from == format!("1 ({:?})", block1_hash) && to == format!("0 ({:?})", genesis_hash) && details == "from number > to number".to_owned()
);
let random_hash1 = H256::random();
let random_hash2 = H256::random();
// Invalid second hash.
assert_matches!(
api.query_storage(&ext, keys.clone(), genesis_hash, Some(random_hash1)),
Err(Error::InvalidBlockRange { from, to, details }) if from == format!("{:?}", genesis_hash) && to == format!("{:?}", Some(random_hash1)) && details == format!(
"UnknownBlock: Header was not found in the database: {:?}",
random_hash1
)
);
// Invalid first hash with Some other hash.
assert_matches!(
api.query_storage(&ext, keys.clone(), random_hash1, Some(genesis_hash)),
Err(Error::InvalidBlockRange { from, to, details }) if from == format!("{:?}", random_hash1) && to == format!("{:?}", Some(genesis_hash)) && details == format!(
"UnknownBlock: Header was not found in the database: {:?}",
random_hash1
)
);
// Invalid first hash with None.
assert_matches!(
api.query_storage(&ext, keys.clone(), random_hash1, None),
Err(Error::InvalidBlockRange { from, to, details }) if from == format!("{:?}", random_hash1) && to == format!("{:?}", Some(block2_hash)) && details == format!(
"UnknownBlock: Header was not found in the database: {:?}",
random_hash1
)
);
// Both hashes invalid.
assert_matches!(
api.query_storage(&ext, keys.clone(), random_hash1, Some(random_hash2)),
Err(Error::InvalidBlockRange { from, to, details }) if from == format!("{:?}", random_hash1) && to == format!("{:?}", Some(random_hash2)) && details == format!(
"UnknownBlock: Header was not found in the database: {:?}",
random_hash1
)
);
// single block range
let result = api.query_storage_at(keys.clone(), Some(block1_hash));
assert_eq!(
result.unwrap(),
vec![StorageChangeSet {
block: block1_hash,
changes: vec![
(StorageKey(vec![1_u8]), None),
(StorageKey(vec![2_u8]), Some(StorageData(vec![2_u8]))),
(StorageKey(vec![3_u8]), Some(StorageData(vec![3_u8]))),
(StorageKey(vec![4_u8]), None),
(StorageKey(vec![5_u8]), Some(StorageData(vec![0_u8]))),
]
}]
);
}
run_tests(Arc::new(substrate_test_runtime_client::new())).await;
run_tests(Arc::new(TestClientBuilder::new().build())).await;
}
#[tokio::test]
async fn should_return_runtime_version() {
let client = Arc::new(substrate_test_runtime_client::new());
let (api, _child) = new_full(client.clone(), test_executor(), None);
// it is basically json-encoded substrate_test_runtime_client::runtime::VERSION
let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\
\"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",5],\
[\"0x37e397fc7c91f5e4\",2],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",6],\
[\"0xbc9d89904f5b923f\",1],[\"0xc6e9a76309f39b09\",2],[\"0xdd718d5cc53262d4\",1],\
[\"0xcbca25e39f142387\",2],[\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],\
[\"0xed99c5acb25eedf5\",3],[\"0xfbc577b9d747efd6\",1]],\"transactionVersion\":1,\"systemVersion\":1,\
\"stateVersion\":1}";
let runtime_version = api.runtime_version(None.into()).unwrap();
let serialized = serde_json::to_string(&runtime_version).unwrap();
pretty_assertions::assert_eq!(serialized, result);
let deserialized: RuntimeVersion = serde_json::from_str(result).unwrap();
assert_eq!(deserialized, runtime_version);
}
#[tokio::test]
async fn should_notify_on_runtime_version_initially() {
let mut sub = {
let client = Arc::new(substrate_test_runtime_client::new());
let (api, _child) = new_full(client, test_executor(), None);
let mut api_rpc = api.into_rpc();
api_rpc.extensions_mut().insert(DenyUnsafe::No);
let sub = api_rpc
.subscribe_unbounded("state_subscribeRuntimeVersion", EmptyParams::new())
.await
.unwrap();
sub
};
// assert initial version sent.
assert_matches!(timeout_secs(10, sub.next::<RuntimeVersion>()).await, Ok(Some(_)));
}
#[test]
fn should_deserialize_storage_key() {
let k = "\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b\"";
let k: StorageKey = serde_json::from_str(k).unwrap();
assert_eq!(k.0.len(), 32);
}
#[tokio::test]
async fn wildcard_storage_subscriptions_are_rpc_unsafe() {
let client = Arc::new(substrate_test_runtime_client::new());
let (api, _child) = new_full(client, test_executor(), None);
let mut api_rpc = api.into_rpc();
api_rpc.extensions_mut().insert(DenyUnsafe::Yes);
let err = api_rpc.subscribe_unbounded("state_subscribeStorage", EmptyParams::new()).await;
assert_matches!(err, Err(RpcError::JsonRpc(e)) if e.message() == "RPC call is unsafe to be called externally");
}
#[tokio::test]
async fn concrete_storage_subscriptions_are_rpc_safe() {
let client = Arc::new(substrate_test_runtime_client::new());
let (api, _child) = new_full(client, test_executor(), None);
let mut api_rpc = api.into_rpc();
api_rpc.extensions_mut().insert(DenyUnsafe::Yes);
let key = StorageKey(STORAGE_KEY.to_vec());
let sub = api_rpc.subscribe_unbounded("state_subscribeStorage", [[key]]).await;
assert!(sub.is_ok());
}
+140
View File
@@ -0,0 +1,140 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use std::{
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
time::Duration,
};
/// An error signifying that a task has been cancelled due to a timeout.
#[derive(Debug)]
pub struct Timeout;
impl std::error::Error for Timeout {}
impl std::fmt::Display for Timeout {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
fmt.write_str("task has been running too long")
}
}
/// A handle which can be used to check whether the task has been cancelled due to a timeout.
#[repr(transparent)]
pub struct IsTimedOut(Arc<AtomicBool>);
impl IsTimedOut {
#[must_use]
pub fn check_if_timed_out(&self) -> std::result::Result<(), Timeout> {
if self.0.load(Ordering::Relaxed) {
Err(Timeout)
} else {
Ok(())
}
}
}
/// An error for a task which either panicked, or has been cancelled due to a timeout.
#[derive(Debug)]
pub enum SpawnWithTimeoutError {
JoinError(tokio::task::JoinError),
Timeout,
}
impl std::error::Error for SpawnWithTimeoutError {}
impl std::fmt::Display for SpawnWithTimeoutError {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
SpawnWithTimeoutError::JoinError(error) => error.fmt(fmt),
SpawnWithTimeoutError::Timeout => Timeout.fmt(fmt),
}
}
}
struct CancelOnDrop(Arc<AtomicBool>);
impl Drop for CancelOnDrop {
fn drop(&mut self) {
self.0.store(true, Ordering::Relaxed)
}
}
/// Spawns a new blocking task with a given `timeout`.
///
/// The `callback` should continuously call [`IsTimedOut::check_if_timed_out`],
/// which will return an error once the task runs for longer than `timeout`.
///
/// If `timeout` is `None` then this works just as a regular `spawn_blocking`.
pub async fn spawn_blocking_with_timeout<R>(
timeout: Option<Duration>,
callback: impl FnOnce(IsTimedOut) -> std::result::Result<R, Timeout> + Send + 'static,
) -> Result<R, SpawnWithTimeoutError>
where
R: Send + 'static,
{
let is_timed_out_arc = Arc::new(AtomicBool::new(false));
let is_timed_out = IsTimedOut(is_timed_out_arc.clone());
let _cancel_on_drop = CancelOnDrop(is_timed_out_arc);
let task = tokio::task::spawn_blocking(move || callback(is_timed_out));
let result = if let Some(timeout) = timeout {
tokio::select! {
// Shouldn't really matter, but make sure the task is polled before the timeout,
// in case the task finishes after the timeout and the timeout is really short.
biased;
task_result = task => task_result,
_ = tokio::time::sleep(timeout) => Ok(Err(Timeout))
}
} else {
task.await
};
match result {
Ok(Ok(result)) => Ok(result),
Ok(Err(Timeout)) => Err(SpawnWithTimeoutError::Timeout),
Err(error) => Err(SpawnWithTimeoutError::JoinError(error)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn spawn_blocking_with_timeout_works() {
let task: Result<(), SpawnWithTimeoutError> =
spawn_blocking_with_timeout(Some(Duration::from_millis(100)), |is_timed_out| {
std::thread::sleep(Duration::from_millis(200));
is_timed_out.check_if_timed_out()?;
unreachable!();
})
.await;
assert_matches::assert_matches!(task, Err(SpawnWithTimeoutError::Timeout));
let task = spawn_blocking_with_timeout(Some(Duration::from_millis(100)), |is_timed_out| {
std::thread::sleep(Duration::from_millis(20));
is_timed_out.check_if_timed_out()?;
Ok(())
})
.await;
assert_matches::assert_matches!(task, Ok(()));
}
}
+144
View File
@@ -0,0 +1,144 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate statement store API.
use codec::{Decode, Encode};
use jsonrpsee::{
core::{async_trait, RpcResult},
Extensions,
};
/// Re-export the API for backward compatibility.
pub use sc_rpc_api::statement::{error::Error, StatementApiServer};
use sp_core::Bytes;
use sp_statement_store::{StatementSource, SubmitResult};
use std::sync::Arc;
/// Statement store API
pub struct StatementStore {
store: Arc<dyn sp_statement_store::StatementStore>,
}
impl StatementStore {
/// Create new instance of Offchain API.
pub fn new(store: Arc<dyn sp_statement_store::StatementStore>) -> Self {
StatementStore { store }
}
}
#[async_trait]
impl StatementApiServer for StatementStore {
fn dump(&self, ext: &Extensions) -> RpcResult<Vec<Bytes>> {
sc_rpc_api::check_if_safe(ext)?;
let statements =
self.store.statements().map_err(|e| Error::StatementStore(e.to_string()))?;
Ok(statements.into_iter().map(|(_, s)| s.encode().into()).collect())
}
fn broadcasts(&self, match_all_topics: Vec<[u8; 32]>) -> RpcResult<Vec<Bytes>> {
Ok(self
.store
.broadcasts(&match_all_topics)
.map_err(|e| Error::StatementStore(e.to_string()))?
.into_iter()
.map(Into::into)
.collect())
}
fn posted(&self, match_all_topics: Vec<[u8; 32]>, dest: [u8; 32]) -> RpcResult<Vec<Bytes>> {
Ok(self
.store
.posted(&match_all_topics, dest)
.map_err(|e| Error::StatementStore(e.to_string()))?
.into_iter()
.map(Into::into)
.collect())
}
fn posted_clear(
&self,
match_all_topics: Vec<[u8; 32]>,
dest: [u8; 32],
) -> RpcResult<Vec<Bytes>> {
Ok(self
.store
.posted_clear(&match_all_topics, dest)
.map_err(|e| Error::StatementStore(e.to_string()))?
.into_iter()
.map(Into::into)
.collect())
}
fn broadcasts_stmt(&self, match_all_topics: Vec<[u8; 32]>) -> RpcResult<Vec<Bytes>> {
Ok(self
.store
.broadcasts_stmt(&match_all_topics)
.map_err(|e| Error::StatementStore(e.to_string()))?
.into_iter()
.map(Into::into)
.collect())
}
fn posted_stmt(
&self,
match_all_topics: Vec<[u8; 32]>,
dest: [u8; 32],
) -> RpcResult<Vec<Bytes>> {
Ok(self
.store
.posted_stmt(&match_all_topics, dest)
.map_err(|e| Error::StatementStore(e.to_string()))?
.into_iter()
.map(Into::into)
.collect())
}
fn posted_clear_stmt(
&self,
match_all_topics: Vec<[u8; 32]>,
dest: [u8; 32],
) -> RpcResult<Vec<Bytes>> {
Ok(self
.store
.posted_clear_stmt(&match_all_topics, dest)
.map_err(|e| Error::StatementStore(e.to_string()))?
.into_iter()
.map(Into::into)
.collect())
}
fn submit(&self, encoded: Bytes) -> RpcResult<()> {
let statement = Decode::decode(&mut &*encoded)
.map_err(|e| Error::StatementStore(format!("Error decoding statement: {:?}", e)))?;
match self.store.submit(statement, StatementSource::Local) {
SubmitResult::New(_) | SubmitResult::Known => Ok(()),
// `KnownExpired` should not happen. Expired statements submitted with
// `StatementSource::Rpc` should be renewed.
SubmitResult::KnownExpired =>
Err(Error::StatementStore("Submitted an expired statement.".into()).into()),
SubmitResult::Bad(e) => Err(Error::StatementStore(e.into()).into()),
SubmitResult::Ignored => Err(Error::StatementStore("Store is full.".into()).into()),
SubmitResult::InternalError(e) => Err(Error::StatementStore(e.to_string()).into()),
}
}
fn remove(&self, hash: [u8; 32]) -> RpcResult<()> {
Ok(self.store.remove(&hash).map_err(|e| Error::StatementStore(e.to_string()))?)
}
}
+190
View File
@@ -0,0 +1,190 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Substrate system API.
#[cfg(test)]
mod tests;
use futures::channel::oneshot;
use jsonrpsee::{
core::{async_trait, JsonValue},
Extensions,
};
use sc_rpc_api::check_if_safe;
use sc_tracing::logging;
use sc_utils::mpsc::TracingUnboundedSender;
use sp_runtime::traits::{self, Header as HeaderT};
pub use self::helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo};
pub use sc_rpc_api::system::*;
/// System API implementation
pub struct System<B: traits::Block> {
info: SystemInfo,
send_back: TracingUnboundedSender<Request<B>>,
}
/// Request to be processed.
pub enum Request<B: traits::Block> {
/// Must return the health of the network.
Health(oneshot::Sender<Health>),
/// Must return the base58-encoded local `PeerId`.
LocalPeerId(oneshot::Sender<String>),
/// Must return the string representation of the addresses we listen on, including the
/// trailing `/p2p/`.
LocalListenAddresses(oneshot::Sender<Vec<String>>),
/// Must return information about the peers we are connected to.
Peers(oneshot::Sender<Vec<PeerInfo<B::Hash, <B::Header as HeaderT>::Number>>>),
/// Must return the state of the network.
NetworkState(oneshot::Sender<serde_json::Value>),
/// Must return any potential parse error.
NetworkAddReservedPeer(String, oneshot::Sender<error::Result<()>>),
/// Must return any potential parse error.
NetworkRemoveReservedPeer(String, oneshot::Sender<error::Result<()>>),
/// Must return the list of reserved peers
NetworkReservedPeers(oneshot::Sender<Vec<String>>),
/// Must return the node role.
NodeRoles(oneshot::Sender<Vec<NodeRole>>),
/// Must return the state of the node syncing.
SyncState(oneshot::Sender<SyncState<<B::Header as HeaderT>::Number>>),
}
impl<B: traits::Block> System<B> {
/// Creates new `System`.
///
/// The `send_back` will be used to transmit some of the requests. The user is responsible for
/// reading from that channel and answering the requests.
pub fn new(info: SystemInfo, send_back: TracingUnboundedSender<Request<B>>) -> Self {
System { info, send_back }
}
}
#[async_trait]
impl<B: traits::Block> SystemApiServer<B::Hash, <B::Header as HeaderT>::Number> for System<B> {
fn system_name(&self) -> Result<String, Error> {
Ok(self.info.impl_name.clone())
}
fn system_version(&self) -> Result<String, Error> {
Ok(self.info.impl_version.clone())
}
fn system_chain(&self) -> Result<String, Error> {
Ok(self.info.chain_name.clone())
}
fn system_type(&self) -> Result<sc_chain_spec::ChainType, Error> {
Ok(self.info.chain_type.clone())
}
fn system_properties(&self) -> Result<sc_chain_spec::Properties, Error> {
Ok(self.info.properties.clone())
}
async fn system_health(&self) -> Result<Health, Error> {
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::Health(tx));
rx.await.map_err(|e| Error::Internal(e.to_string()))
}
async fn system_local_peer_id(&self) -> Result<String, Error> {
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::LocalPeerId(tx));
rx.await.map_err(|e| Error::Internal(e.to_string()))
}
async fn system_local_listen_addresses(&self) -> Result<Vec<String>, Error> {
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::LocalListenAddresses(tx));
rx.await.map_err(|e| Error::Internal(e.to_string()))
}
async fn system_peers(
&self,
ext: &Extensions,
) -> Result<Vec<PeerInfo<B::Hash, <B::Header as HeaderT>::Number>>, Error> {
check_if_safe(ext)?;
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::Peers(tx));
rx.await.map_err(|e| Error::Internal(e.to_string()))
}
async fn system_network_state(&self, ext: &Extensions) -> Result<JsonValue, Error> {
check_if_safe(ext)?;
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::NetworkState(tx));
rx.await.map_err(|e| Error::Internal(e.to_string()))
}
async fn system_add_reserved_peer(&self, ext: &Extensions, peer: String) -> Result<(), Error> {
check_if_safe(ext)?;
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::NetworkAddReservedPeer(peer, tx));
match rx.await {
Ok(Ok(())) => Ok(()),
Ok(Err(e)) => Err(e),
Err(e) => Err(Error::Internal(e.to_string())),
}
}
async fn system_remove_reserved_peer(
&self,
ext: &Extensions,
peer: String,
) -> Result<(), Error> {
check_if_safe(ext)?;
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::NetworkRemoveReservedPeer(peer, tx));
match rx.await {
Ok(Ok(())) => Ok(()),
Ok(Err(e)) => Err(e),
Err(e) => Err(Error::Internal(e.to_string())),
}
}
async fn system_reserved_peers(&self) -> Result<Vec<String>, Error> {
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::NetworkReservedPeers(tx));
rx.await.map_err(|e| Error::Internal(e.to_string()))
}
async fn system_node_roles(&self) -> Result<Vec<NodeRole>, Error> {
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::NodeRoles(tx));
rx.await.map_err(|e| Error::Internal(e.to_string()))
}
async fn system_sync_state(&self) -> Result<SyncState<<B::Header as HeaderT>::Number>, Error> {
let (tx, rx) = oneshot::channel();
let _ = self.send_back.unbounded_send(Request::SyncState(tx));
rx.await.map_err(|e| Error::Internal(e.to_string()))
}
fn system_add_log_filter(&self, ext: &Extensions, directives: String) -> Result<(), Error> {
check_if_safe(ext)?;
logging::add_directives(&directives);
logging::reload_filter().map_err(|e| Error::Internal(e))
}
fn system_reset_log_filter(&self, ext: &Extensions) -> Result<(), Error> {
check_if_safe(ext)?;
logging::reset_log_filter().map_err(|e| Error::Internal(e))
}
}
+423
View File
@@ -0,0 +1,423 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
use super::{helpers::SyncState, *};
use crate::DenyUnsafe;
use assert_matches::assert_matches;
use futures::prelude::*;
use jsonrpsee::{core::EmptyServerParams as EmptyParams, MethodsError as RpcError, RpcModule};
use sc_network::{self, config::Role, PeerId};
use sc_rpc_api::system::helpers::PeerInfo;
use sc_utils::mpsc::tracing_unbounded;
use sp_core::H256;
use std::{
env,
io::{BufRead, BufReader, Write},
process::{Command, Stdio},
thread,
};
use substrate_test_runtime_client::runtime::Block;
struct Status {
pub peers: usize,
pub is_syncing: bool,
pub is_dev: bool,
pub peer_id: PeerId,
}
impl Default for Status {
fn default() -> Status {
Status { peer_id: PeerId::random(), peers: 0, is_syncing: false, is_dev: false }
}
}
fn api<T: Into<Option<Status>>>(sync: T) -> RpcModule<System<Block>> {
let status = sync.into().unwrap_or_default();
let should_have_peers = !status.is_dev;
let (tx, rx) = tracing_unbounded("rpc_system_tests", 10_000);
thread::spawn(move || {
futures::executor::block_on(rx.for_each(move |request| {
match request {
Request::Health(sender) => {
let _ = sender.send(Health {
peers: status.peers,
is_syncing: status.is_syncing,
should_have_peers,
});
},
Request::LocalPeerId(sender) => {
let _ =
sender.send("QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string());
},
Request::LocalListenAddresses(sender) => {
let _ = sender.send(vec![
"/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(),
"/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(),
]);
},
Request::Peers(sender) => {
let mut peers = vec![];
for _peer in 0..status.peers {
peers.push(PeerInfo {
peer_id: status.peer_id.to_base58(),
roles: format!("{}", Role::Full),
best_hash: Default::default(),
best_number: 1,
});
}
let _ = sender.send(peers);
},
Request::NetworkState(sender) => {
let _ = sender.send(
serde_json::to_value(&sc_network::network_state::NetworkState {
peer_id: String::new(),
listened_addresses: Default::default(),
external_addresses: Default::default(),
connected_peers: Default::default(),
not_connected_peers: Default::default(),
peerset: serde_json::Value::Null,
})
.unwrap(),
);
},
Request::NetworkAddReservedPeer(peer, sender) => {
let _ = match sc_network::config::parse_str_addr(&peer) {
Ok(_) => sender.send(Ok(())),
Err(s) =>
sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))),
};
},
Request::NetworkRemoveReservedPeer(peer, sender) => {
let _ = match peer.parse::<PeerId>() {
Ok(_) => sender.send(Ok(())),
Err(s) =>
sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))),
};
},
Request::NetworkReservedPeers(sender) => {
let _ = sender
.send(vec!["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()]);
},
Request::NodeRoles(sender) => {
let _ = sender.send(vec![NodeRole::Authority]);
},
Request::SyncState(sender) => {
let _ = sender.send(SyncState {
starting_block: 1,
current_block: 2,
highest_block: 3,
});
},
};
future::ready(())
}))
});
let mut module = System::new(
SystemInfo {
impl_name: "testclient".into(),
impl_version: "0.2.0".into(),
chain_name: "testchain".into(),
properties: Default::default(),
chain_type: Default::default(),
},
tx,
)
.into_rpc();
module.extensions_mut().insert(DenyUnsafe::No);
module
}
#[tokio::test]
async fn system_name_works() {
assert_eq!(
api(None).call::<_, String>("system_name", EmptyParams::new()).await.unwrap(),
"testclient".to_string(),
);
}
#[tokio::test]
async fn system_version_works() {
assert_eq!(
api(None).call::<_, String>("system_version", EmptyParams::new()).await.unwrap(),
"0.2.0".to_string(),
);
}
#[tokio::test]
async fn system_chain_works() {
assert_eq!(
api(None).call::<_, String>("system_chain", EmptyParams::new()).await.unwrap(),
"testchain".to_string(),
);
}
#[tokio::test]
async fn system_properties_works() {
type Map = serde_json::map::Map<String, serde_json::Value>;
assert_eq!(
api(None).call::<_, Map>("system_properties", EmptyParams::new()).await.unwrap(),
Map::new()
);
}
#[tokio::test]
async fn system_type_works() {
assert_eq!(
api(None)
.call::<_, String>("system_chainType", EmptyParams::new())
.await
.unwrap(),
"Live".to_owned(),
);
}
#[tokio::test]
async fn system_health() {
assert_eq!(
api(None).call::<_, Health>("system_health", EmptyParams::new()).await.unwrap(),
Health { peers: 0, is_syncing: false, should_have_peers: true },
);
assert_eq!(
api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: true, is_dev: true })
.call::<_, Health>("system_health", EmptyParams::new())
.await
.unwrap(),
Health { peers: 5, is_syncing: true, should_have_peers: false },
);
assert_eq!(
api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: false, is_dev: false })
.call::<_, Health>("system_health", EmptyParams::new())
.await
.unwrap(),
Health { peers: 5, is_syncing: false, should_have_peers: true },
);
assert_eq!(
api(Status { peer_id: PeerId::random(), peers: 0, is_syncing: false, is_dev: true })
.call::<_, Health>("system_health", EmptyParams::new())
.await
.unwrap(),
Health { peers: 0, is_syncing: false, should_have_peers: false },
);
}
#[tokio::test]
async fn system_local_peer_id_works() {
assert_eq!(
api(None)
.call::<_, String>("system_localPeerId", EmptyParams::new())
.await
.unwrap(),
"QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_owned()
);
}
#[tokio::test]
async fn system_local_listen_addresses_works() {
assert_eq!(
api(None)
.call::<_, Vec<String>>("system_localListenAddresses", EmptyParams::new())
.await
.unwrap(),
vec![
"/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV",
"/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"
]
);
}
#[tokio::test]
async fn system_peers() {
let peer_id = PeerId::random();
let peer_info: Vec<PeerInfo<H256, u64>> =
api(Status { peer_id, peers: 1, is_syncing: false, is_dev: true })
.call("system_peers", EmptyParams::new())
.await
.unwrap();
assert_eq!(
peer_info,
vec![PeerInfo {
peer_id: peer_id.to_base58(),
roles: "FULL".into(),
best_hash: Default::default(),
best_number: 1u64,
}]
);
}
#[tokio::test]
async fn system_network_state() {
use sc_network::network_state::NetworkState;
let network_state: NetworkState = api(None)
.call("system_unstable_networkState", EmptyParams::new())
.await
.unwrap();
assert_eq!(
network_state,
NetworkState {
peer_id: String::new(),
listened_addresses: Default::default(),
external_addresses: Default::default(),
connected_peers: Default::default(),
not_connected_peers: Default::default(),
peerset: serde_json::Value::Null,
}
);
}
#[tokio::test]
async fn system_node_roles() {
let node_roles: Vec<NodeRole> =
api(None).call("system_nodeRoles", EmptyParams::new()).await.unwrap();
assert_eq!(node_roles, vec![NodeRole::Authority]);
}
#[tokio::test]
async fn system_sync_state() {
let sync_state: SyncState<i32> =
api(None).call("system_syncState", EmptyParams::new()).await.unwrap();
assert_eq!(sync_state, SyncState { starting_block: 1, current_block: 2, highest_block: 3 });
}
#[tokio::test]
async fn system_network_add_reserved() {
let good_peer_id =
["/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"];
let _good: () = api(None)
.call("system_addReservedPeer", good_peer_id)
.await
.expect("good peer id works");
let bad_peer_id = ["/ip4/198.51.100.19/tcp/30333"];
assert_matches!(
api(None).call::<_, ()>("system_addReservedPeer", bad_peer_id).await,
Err(RpcError::JsonRpc(err)) if err.message().contains("Peer id is missing from the address")
);
}
#[tokio::test]
async fn system_network_remove_reserved() {
let _good_peer: () = api(None)
.call("system_removeReservedPeer", ["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"])
.await
.expect("call with good peer id works");
let bad_peer_id =
["/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"];
assert_matches!(
api(None).call::<_, String>("system_removeReservedPeer", bad_peer_id).await,
Err(RpcError::JsonRpc(err)) if err.message().contains("base-58 decode error: provided string contained invalid character '/' at byte 0")
);
}
#[tokio::test]
async fn system_network_reserved_peers() {
let reserved_peers: Vec<String> =
api(None).call("system_reservedPeers", EmptyParams::new()).await.unwrap();
assert_eq!(reserved_peers, vec!["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()],);
}
#[test]
fn test_add_reset_log_filter() {
const EXPECTED_BEFORE_ADD: &'static str = "EXPECTED_BEFORE_ADD";
const EXPECTED_AFTER_ADD: &'static str = "EXPECTED_AFTER_ADD";
const EXPECTED_WITH_TRACE: &'static str = "EXPECTED_WITH_TRACE";
// Enter log generation / filter reload
if std::env::var("TEST_LOG_FILTER").is_ok() {
let mut builder = sc_tracing::logging::LoggerBuilder::new("test_before_add=debug");
builder.with_log_reloading(true);
builder.init().unwrap();
for line in std::io::stdin().lock().lines() {
let line = line.expect("Failed to read bytes");
if line.contains("add_reload") {
let filter = "test_after_add";
let fut =
async move { api(None).call::<_, ()>("system_addLogFilter", [filter]).await };
futures::executor::block_on(fut).expect("`system_addLogFilter` failed");
} else if line.contains("add_trace") {
let filter = "test_before_add=trace";
let fut =
async move { api(None).call::<_, ()>("system_addLogFilter", [filter]).await };
futures::executor::block_on(fut).expect("`system_addLogFilter (trace)` failed");
} else if line.contains("reset") {
let fut = async move {
api(None).call::<_, ()>("system_resetLogFilter", EmptyParams::new()).await
};
futures::executor::block_on(fut).expect("`system_resetLogFilter` failed");
} else if line.contains("exit") {
return;
}
log::trace!(target: "test_before_add", "{}", EXPECTED_WITH_TRACE);
log::debug!(target: "test_before_add", "{}", EXPECTED_BEFORE_ADD);
log::debug!(target: "test_after_add", "{}", EXPECTED_AFTER_ADD);
}
}
// Call this test again to enter the log generation / filter reload block
let test_executable = env::current_exe().expect("Unable to get current executable!");
let mut child_process = Command::new(test_executable)
.env("TEST_LOG_FILTER", "1")
.args(&["--nocapture", "test_add_reset_log_filter"])
.stdin(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap();
let child_stderr = child_process.stderr.take().expect("Could not get child stderr");
let mut child_out = BufReader::new(child_stderr);
let mut child_in = child_process.stdin.take().expect("Could not get child stdin");
let mut read_line = || {
let mut line = String::new();
child_out.read_line(&mut line).expect("Reading a line");
line
};
// Initiate logs loop in child process
child_in.write_all(b"\n").unwrap();
assert!(read_line().contains(EXPECTED_BEFORE_ADD));
// Initiate add directive & reload in child process
child_in.write_all(b"add_reload\n").unwrap();
assert!(read_line().contains(EXPECTED_BEFORE_ADD));
assert!(read_line().contains(EXPECTED_AFTER_ADD));
// Check that increasing the max log level works
child_in.write_all(b"add_trace\n").unwrap();
assert!(read_line().contains(EXPECTED_WITH_TRACE));
assert!(read_line().contains(EXPECTED_BEFORE_ADD));
assert!(read_line().contains(EXPECTED_AFTER_ADD));
// Initiate logs filter reset in child process
child_in.write_all(b"reset\n").unwrap();
assert!(read_line().contains(EXPECTED_BEFORE_ADD));
// Return from child process
child_in.write_all(b"exit\n").unwrap();
assert!(child_process.wait().expect("Error waiting for child process").success());
// Check for EOF
assert_eq!(child_out.read_line(&mut String::new()).unwrap(), 0);
}
+89
View File
@@ -0,0 +1,89 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! Testing utils used by the RPC tests.
use std::{future::Future, sync::Arc};
use jsonrpsee::Extensions;
use sc_rpc_api::DenyUnsafe;
/// A task executor that can be used for running RPC tests.
///
/// Warning: the tokio runtime must be initialized before calling this.
#[derive(Clone)]
pub struct TokioTestExecutor(tokio::runtime::Handle);
impl TokioTestExecutor {
/// Create a new instance of `Self`.
pub fn new() -> Self {
Self(tokio::runtime::Handle::current())
}
}
impl Default for TokioTestExecutor {
fn default() -> Self {
Self::new()
}
}
impl sp_core::traits::SpawnNamed for TokioTestExecutor {
fn spawn_blocking(
&self,
_name: &'static str,
_group: Option<&'static str>,
future: futures::future::BoxFuture<'static, ()>,
) {
let handle = self.0.clone();
self.0.spawn_blocking(move || {
handle.block_on(future);
});
}
fn spawn(
&self,
_name: &'static str,
_group: Option<&'static str>,
future: futures::future::BoxFuture<'static, ()>,
) {
self.0.spawn(future);
}
}
/// Executor for testing.
pub fn test_executor() -> Arc<TokioTestExecutor> {
Arc::new(TokioTestExecutor::default())
}
/// Wrap a future in a timeout a little more concisely
pub fn timeout_secs<I, F: Future<Output = I>>(s: u64, f: F) -> tokio::time::Timeout<F> {
tokio::time::timeout(std::time::Duration::from_secs(s), f)
}
/// Helper to create an extension that denies unsafe calls.
pub fn deny_unsafe() -> Extensions {
let mut ext = Extensions::new();
ext.insert(DenyUnsafe::Yes);
ext
}
/// Helper to create an extension that allows unsafe calls.
pub fn allow_unsafe() -> Extensions {
let mut ext = Extensions::new();
ext.insert(DenyUnsafe::No);
ext
}
+405
View File
@@ -0,0 +1,405 @@
// This file is part of Substrate.
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
//! JSON-RPC helpers.
use crate::SubscriptionTaskExecutor;
use futures::{
future::{self, Either, Fuse, FusedFuture},
Future, FutureExt, Stream, StreamExt, TryStream, TryStreamExt,
};
use jsonrpsee::{
types::SubscriptionId, DisconnectError, PendingSubscriptionSink, SubscriptionMessage,
SubscriptionSink,
};
use sp_runtime::Serialize;
use std::collections::VecDeque;
const DEFAULT_BUF_SIZE: usize = 16;
/// A trait representing a buffer which may or may not support
/// to replace items when the buffer is full.
pub trait Buffer {
/// The item type that the buffer holds.
type Item;
/// Push an item to the buffer.
///
/// Returns `Err` if the buffer doesn't support replacing older items
fn push(&mut self, item: Self::Item) -> Result<(), ()>;
/// Pop the next item from the buffer.
fn pop(&mut self) -> Option<Self::Item>;
}
/// A simple bounded buffer that will terminate the subscription if the buffer becomes full.
pub struct BoundedVecDeque<T> {
inner: VecDeque<T>,
max_cap: usize,
}
impl<T> Default for BoundedVecDeque<T> {
fn default() -> Self {
Self { inner: VecDeque::with_capacity(DEFAULT_BUF_SIZE), max_cap: DEFAULT_BUF_SIZE }
}
}
impl<T> BoundedVecDeque<T> {
/// Create a new bounded VecDeque.
pub fn new(cap: usize) -> Self {
Self { inner: VecDeque::with_capacity(cap), max_cap: cap }
}
}
impl<T> Buffer for BoundedVecDeque<T> {
type Item = T;
fn push(&mut self, item: Self::Item) -> Result<(), ()> {
if self.inner.len() >= self.max_cap {
Err(())
} else {
self.inner.push_back(item);
Ok(())
}
}
fn pop(&mut self) -> Option<T> {
self.inner.pop_front()
}
}
/// Fixed size ring buffer that replaces the oldest item when full.
#[derive(Debug)]
pub struct RingBuffer<T> {
inner: VecDeque<T>,
cap: usize,
}
impl<T> RingBuffer<T> {
/// Create a new ring buffer.
pub fn new(cap: usize) -> Self {
Self { inner: VecDeque::with_capacity(cap), cap }
}
}
impl<T> Buffer for RingBuffer<T> {
type Item = T;
fn push(&mut self, item: T) -> Result<(), ()> {
if self.inner.len() >= self.cap {
self.inner.pop_front();
}
self.inner.push_back(item);
Ok(())
}
fn pop(&mut self) -> Option<T> {
self.inner.pop_front()
}
}
/// A pending subscription.
pub struct PendingSubscription(PendingSubscriptionSink);
impl From<PendingSubscriptionSink> for PendingSubscription {
fn from(p: PendingSubscriptionSink) -> Self {
Self(p)
}
}
impl PendingSubscription {
/// Feed items to the subscription from the underlying stream
/// with specified buffer strategy.
pub async fn pipe_from_stream<S, T, B>(self, mut stream: S, mut buf: B)
where
S: Stream<Item = T> + Unpin + Send + 'static,
T: Serialize + Send + 'static,
B: Buffer<Item = T>,
{
let method = self.0.method_name().to_string();
let conn_id = self.0.connection_id().0;
let accept_fut = self.0.accept();
futures::pin_mut!(accept_fut);
// Poll the stream while waiting for the subscription to be accepted
//
// If the `max_cap` is exceeded then the subscription is dropped.
let sink = loop {
match future::select(accept_fut, stream.next()).await {
Either::Left((Ok(sink), _)) => break sink,
Either::Right((Some(msg), f)) => {
if buf.push(msg).is_err() {
log::debug!(target: "rpc", "Subscription::accept buffer full for subscription={method} conn_id={conn_id}; dropping subscription");
return;
}
accept_fut = f;
},
// The connection was closed or the stream was closed.
_ => return,
}
};
Subscription(sink).pipe_from_stream(stream, buf).await
}
}
/// An active subscription.
#[derive(Clone, Debug)]
pub struct Subscription(SubscriptionSink);
impl From<SubscriptionSink> for Subscription {
fn from(sink: SubscriptionSink) -> Self {
Self(sink)
}
}
impl Subscription {
/// Feed items to the subscription from the underlying stream
/// with specified buffer strategy.
pub async fn pipe_from_stream<S, T, B>(&self, stream: S, buf: B)
where
S: Stream<Item = T> + Unpin,
T: Serialize + Send,
B: Buffer<Item = T>,
{
self.pipe_from_try_stream(stream.map(Ok::<T, ()>), buf)
.await
.expect("No Err will be ever encountered.qed");
}
/// Feed items to the subscription from the underlying stream
/// with specified buffer strategy.
pub async fn pipe_from_try_stream<S, T, B, E>(&self, mut stream: S, mut buf: B) -> Result<(), E>
where
S: TryStream<Ok = T, Error = E> + Unpin,
T: Serialize + Send,
B: Buffer<Item = T>,
{
let mut next_fut = Box::pin(Fuse::terminated());
let mut next_item = stream.try_next();
let closed = self.0.closed();
futures::pin_mut!(closed);
loop {
if next_fut.is_terminated() {
if let Some(v) = buf.pop() {
let val = self.to_sub_message(&v);
next_fut.set(async { self.0.send(val).await }.fuse());
}
}
match future::select(closed, future::select(next_fut, next_item)).await {
// Send operation finished.
Either::Right((Either::Left((_, n)), c)) => {
next_item = n;
closed = c;
next_fut = Box::pin(Fuse::terminated());
},
// New item from the stream
Either::Right((Either::Right((Ok(Some(v)), n)), c)) => {
if buf.push(v).is_err() {
log::debug!(
target: "rpc",
"Subscription buffer full for subscription={} conn_id={}; dropping subscription",
self.0.method_name(),
self.0.connection_id().0
);
return Ok(());
}
next_fut = n;
closed = c;
next_item = stream.try_next();
},
// Error occurred while processing the stream.
//
// Terminate the stream.
Either::Right((Either::Right((Err(e), _)), _)) => return Err(e),
// Stream "finished".
//
// Process remaining items and terminate.
Either::Right((Either::Right((Ok(None), pending_fut)), _)) => {
if !pending_fut.is_terminated() && pending_fut.await.is_err() {
return Ok(());
}
while let Some(v) = buf.pop() {
if self.send(&v).await.is_err() {
return Ok(());
}
}
return Ok(());
},
// Subscription was closed.
Either::Left(_) => return Ok(()),
}
}
}
/// Send a message on the subscription.
pub async fn send(&self, result: &impl Serialize) -> Result<(), DisconnectError> {
self.0.send(self.to_sub_message(result)).await
}
/// Get the subscription id.
pub fn subscription_id(&self) -> SubscriptionId<'_> {
self.0.subscription_id()
}
/// Completes when the subscription is closed.
pub async fn closed(&self) {
self.0.closed().await
}
/// Convert a result to a subscription message.
fn to_sub_message(&self, result: &impl Serialize) -> SubscriptionMessage {
SubscriptionMessage::new(self.0.method_name(), self.0.subscription_id(), result)
.expect("Serialize infallible; qed")
}
}
/// Helper for spawning non-blocking rpc subscription task.
pub fn spawn_subscription_task(
executor: &SubscriptionTaskExecutor,
fut: impl Future<Output = ()> + Send + 'static,
) {
executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.boxed());
}
#[cfg(test)]
mod tests {
use super::*;
use futures::StreamExt;
use jsonrpsee::{core::EmptyServerParams, RpcModule, Subscription};
async fn subscribe() -> Subscription {
let mut module = RpcModule::new(());
module
.register_subscription("sub", "my_sub", "unsub", |_, pending, _, _| async move {
let stream = futures::stream::iter([0; 16]);
PendingSubscription::from(pending)
.pipe_from_stream(stream, BoundedVecDeque::new(16))
.await;
Ok(())
})
.unwrap();
module.subscribe("sub", EmptyServerParams::new(), 1).await.unwrap()
}
#[tokio::test]
async fn pipe_from_stream_works() {
let mut sub = subscribe().await;
let mut rx = 0;
while let Some(Ok(_)) = sub.next::<usize>().await {
rx += 1;
}
assert_eq!(rx, 16);
}
#[tokio::test]
async fn pipe_from_stream_with_bounded_vec() {
let (tx, mut rx) = futures::channel::mpsc::unbounded::<()>();
let mut module = RpcModule::new(tx);
module
.register_subscription("sub", "my_sub", "unsub", |_, pending, ctx, _| async move {
let stream = futures::stream::iter([0; 32]);
PendingSubscription::from(pending)
.pipe_from_stream(stream, BoundedVecDeque::new(16))
.await;
_ = ctx.unbounded_send(());
Ok(())
})
.unwrap();
let mut sub = module.subscribe("sub", EmptyServerParams::new(), 1).await.unwrap();
// When the 17th item arrives the subscription is dropped
_ = rx.next().await.unwrap();
assert!(sub.next::<usize>().await.is_none());
}
#[tokio::test]
async fn subscription_is_dropped_when_stream_is_empty() {
let notify_rx = std::sync::Arc::new(tokio::sync::Notify::new());
let notify_tx = notify_rx.clone();
let mut module = RpcModule::new(notify_tx);
module
.register_subscription(
"sub",
"my_sub",
"unsub",
|_, pending, notify_tx, _| async move {
// emulate empty stream for simplicity: otherwise we need some mechanism
// to sync buffer and channel send operations
let stream = futures::stream::empty::<()>();
// this should exit immediately
PendingSubscription::from(pending)
.pipe_from_stream(stream, BoundedVecDeque::default())
.await;
// notify that the `pipe_from_stream` has returned
notify_tx.notify_one();
Ok(())
},
)
.unwrap();
module.subscribe("sub", EmptyServerParams::new(), 1).await.unwrap();
// it should fire once `pipe_from_stream` returns
notify_rx.notified().await;
}
#[tokio::test]
async fn subscription_replace_old_messages() {
let mut module = RpcModule::new(());
module
.register_subscription("sub", "my_sub", "unsub", |_, pending, _, _| async move {
// Send items 0..20 and ensure that only the last 3 are kept in the buffer.
let stream = futures::stream::iter(0..20);
PendingSubscription::from(pending)
.pipe_from_stream(stream, RingBuffer::new(3))
.await;
Ok(())
})
.unwrap();
let mut sub = module.subscribe("sub", EmptyServerParams::new(), 1).await.unwrap();
// This is a hack simulate a very slow client
// and all older messages are replaced.
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
let mut res = Vec::new();
while let Some(Ok((v, _))) = sub.next::<usize>().await {
res.push(v);
}
// There is no way to cancel pending send operations so
// that's why 0 is included here.
assert_eq!(res, vec![0, 17, 18, 19]);
}
}