diff --git a/Cargo.lock b/Cargo.lock index 3359d45530..cf7817a6a5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1987,7 +1987,6 @@ dependencies = [ "regex", "scale-info", "sp-core", - "sp-runtime", "substrate-runner", "subxt", "subxt-codegen", @@ -2581,12 +2580,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "parity-wasm" -version = "0.45.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" - [[package]] name = "parking" version = "2.1.0" @@ -3842,18 +3835,6 @@ dependencies = [ "twox-hash", ] -[[package]] -name = "sp-core-hashing-proc-macro" -version = "9.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727d7c7d42e59a13eb1412758630569dd6c3640c4754d9a948e8d943f99bc932" -dependencies = [ - "proc-macro2", - "quote", - "sp-core-hashing", - "syn 2.0.28", -] - [[package]] name = "sp-debug-derive" version = "8.0.0" @@ -4074,36 +4055,6 @@ dependencies = [ "trie-root", ] -[[package]] -name = "sp-version" -version = "22.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a49e88372c9d923e95dc855f1e7bdbca6638574d59a8c3cb9f7e5ecea638481" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "parity-wasm", - "scale-info", - "serde", - "sp-core-hashing-proc-macro", - "sp-runtime", - "sp-std", - "sp-version-proc-macro", - "thiserror", -] - -[[package]] -name = "sp-version-proc-macro" -version = "8.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39e1e11ab230fd0ede7aefeb246a1ef7e6af58634c490a2d911ce71aabc0718a" -dependencies = [ - "parity-scale-codec", - "proc-macro2", - "quote", - "syn 2.0.28", -] - [[package]] name = "sp-wasm-interface" version = "14.0.0" @@ -4235,6 +4186,7 @@ name = "subxt" version = "0.31.0" dependencies = [ "assert_matches", + "async-trait", "base58", "bitvec", "blake2", @@ -4259,7 +4211,6 @@ dependencies = [ "sp-core-hashing", "sp-keyring", "sp-runtime", - "sp-version", "subxt-lightclient", "subxt-macro", "subxt-metadata", diff --git a/Cargo.toml b/Cargo.toml index 519833fe8d..ccd3ca9b58 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ documentation = "https://docs.rs/subxt" homepage = "https://www.parity.io/" [workspace.dependencies] +async-trait = "0.1.72" assert_matches = "1.5.0" base58 = { version = "0.2.0" } bitvec = { version = "1", default-features = false } @@ -100,7 +101,6 @@ sp-core = { version = "21.0.0", default-features = false } sp-core-hashing = "9.0.0" sp-runtime = "24.0.0" sp-keyring = "24.0.0" -sp-version = "22.0.0" # Subxt workspace crates: subxt = { version = "0.31.0", path = "subxt", default-features = false } diff --git a/cli/src/commands/explore/calls.rs b/cli/src/commands/explore/calls.rs index ba18f2e44a..8eb993df6d 100644 --- a/cli/src/commands/explore/calls.rs +++ b/cli/src/commands/explore/calls.rs @@ -125,10 +125,9 @@ fn mocked_offline_client(metadata: Metadata) -> OfflineClient { H256::from_str("91b171bb158e2d3848fa23a9f1c25182fb8e20313b2c1eb49219da7a70ce90c3") .expect("Valid hash; qed"); - let runtime_version = subxt::rpc::types::RuntimeVersion { + let runtime_version = subxt::backend::RuntimeVersion { spec_version: 9370, transaction_version: 20, - other: Default::default(), }; OfflineClient::::new(genesis_hash, runtime_version, metadata) diff --git a/examples/wasm-example/src/routes/signing.rs b/examples/wasm-example/src/routes/signing.rs index 64b0be7e01..db6d059ada 100644 --- a/examples/wasm-example/src/routes/signing.rs +++ b/examples/wasm-example/src/routes/signing.rs @@ -162,9 +162,9 @@ impl Component for SigningExamplesComponent { // Apply the signature let signed_extrinsic = partial_signed.sign_with_address_and_signature(&account_id.into(), &multi_signature); - // do a dry run (to debug in the js console if the extrinsic would work) - let dry_res = signed_extrinsic.dry_run(None).await; - web_sys::console::log_1(&format!("Dry Run Result: {:?}", dry_res).into()); + // check the TX validity (to debug in the js console if the extrinsic would work) + let dry_res = signed_extrinsic.validate().await; + web_sys::console::log_1(&format!("Validation Result: {:?}", dry_res).into()); // return the signature and signed extrinsic Message::ReceivedSignature(multi_signature, signed_extrinsic) diff --git a/examples/wasm-example/src/services.rs b/examples/wasm-example/src/services.rs index 59e0ca2b1e..a6cd9b2c6d 100644 --- a/examples/wasm-example/src/services.rs +++ b/examples/wasm-example/src/services.rs @@ -48,8 +48,8 @@ pub(crate) async fn subscribe_to_finalized_blocks( writeln!(output, "Block #{}:", block.header().number).ok(); writeln!(output, " Hash: {}", block.hash()).ok(); writeln!(output, " Extrinsics:").ok(); - let body = block.body().await?; - for ext in body.extrinsics().iter() { + let extrinsics = block.extrinsics().await?; + for ext in extrinsics.iter() { let ext = ext?; let idx = ext.index(); let events = ext.events().await?; diff --git a/subxt/Cargo.toml b/subxt/Cargo.toml index 13b5a27183..ac64287cc5 100644 --- a/subxt/Cargo.toml +++ b/subxt/Cargo.toml @@ -29,25 +29,15 @@ native = [ # Enable this for web/wasm builds. # Exactly 1 of "web" and "native" is expected. -web = [ - "jsonrpsee?/async-wasm-client", - "jsonrpsee?/client-web-transport", - "getrandom/js", - "subxt-lightclient?/web" -] +web = ["jsonrpsee?/async-wasm-client", "jsonrpsee?/client-web-transport", "getrandom/js", "subxt-lightclient?/web"] # Enable this to use jsonrpsee (allowing for example `OnlineClient::from_url`). -jsonrpsee = [ - "dep:jsonrpsee" -] +jsonrpsee = ["dep:jsonrpsee"] # Enable this to pull in extra Substrate dependencies which make it possible to # use the `sp_core::crypto::Pair` Signer implementation, as well as adding some # `From` impls for types like `AccountId32`. Cannot be used with "web". -substrate-compat = [ - "sp-core", - "sp-runtime" -] +substrate-compat = ["sp-core", "sp-runtime"] # Enable this to fetch and utilize the latest unstable metadata from a node. # The unstable metadata is subject to breaking changes and the subxt might @@ -57,12 +47,10 @@ unstable-metadata = [] # Activate this to expose the Light Client functionality. # Note that this feature is experimental and things may break or not work as expected. -unstable-light-client = [ - "subxt-lightclient", - "tokio-stream" -] +unstable-light-client = ["subxt-lightclient", "tokio-stream"] [dependencies] +async-trait = { workspace = true } codec = { package = "parity-scale-codec", workspace = true, features = ["derive"] } scale-info = { workspace = true } scale-value = { workspace = true } @@ -112,9 +100,8 @@ codec = { workspace = true, features = ["derive", "bit-vec"] } scale-info = { workspace = true, features = ["bit-vec"] } tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } sp-core = { workspace = true } -sp-runtime = { workspace = true } sp-keyring = { workspace = true } -sp-version = { workspace = true } +sp-runtime = { workspace = true } assert_matches = { workspace = true } subxt-signer = { path = "../signer", features = ["subxt"] } # Tracing subscriber is useful for light-client examples to ensure that diff --git a/subxt/examples/blocks_subscribing.rs b/subxt/examples/blocks_subscribing.rs index 136af804c1..4f06221484 100644 --- a/subxt/examples/blocks_subscribing.rs +++ b/subxt/examples/blocks_subscribing.rs @@ -1,4 +1,3 @@ -use futures::StreamExt; use subxt::{OnlineClient, PolkadotConfig}; #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] @@ -24,8 +23,8 @@ async fn main() -> Result<(), Box> { println!(" Extrinsics:"); // Log each of the extrinsic with it's associated events: - let body = block.body().await?; - for ext in body.extrinsics().iter() { + let extrinsics = block.extrinsics().await?; + for ext in extrinsics.iter() { let ext = ext?; let idx = ext.index(); let events = ext.events().await?; diff --git a/subxt/examples/setup_client_custom_rpc.rs b/subxt/examples/setup_client_custom_rpc.rs index ed04f1f4fa..0745ec6123 100644 --- a/subxt/examples/setup_client_custom_rpc.rs +++ b/subxt/examples/setup_client_custom_rpc.rs @@ -4,7 +4,7 @@ use std::{ sync::{Arc, Mutex}, }; use subxt::{ - rpc::{RawValue, RpcClientT, RpcFuture, RpcSubscription}, + backend::rpc::{RawRpcFuture, RawRpcSubscription, RawValue, RpcClient, RpcClientT}, OnlineClient, PolkadotConfig, }; @@ -22,7 +22,7 @@ impl RpcClientT for MyLoggingClient { &'a self, method: &'a str, params: Option>, - ) -> RpcFuture<'a, Box> { + ) -> RawRpcFuture<'a, Box> { writeln!( self.log.lock().unwrap(), "{method}({})", @@ -41,7 +41,7 @@ impl RpcClientT for MyLoggingClient { sub: &'a str, params: Option>, unsub: &'a str, - ) -> RpcFuture<'a, RpcSubscription> { + ) -> RawRpcFuture<'a, RawRpcSubscription> { writeln!( self.log.lock().unwrap(), "{sub}({}) (unsub: {unsub})", @@ -56,7 +56,10 @@ impl RpcClientT for MyLoggingClient { let stream = futures::stream::once(async move { Ok(res) }); let stream: Pin + Send>> = Box::pin(stream); // This subscription does not provide an ID. - Box::pin(std::future::ready(Ok(RpcSubscription { stream, id: None }))) + Box::pin(std::future::ready(Ok(RawRpcSubscription { + stream, + id: None, + }))) } } @@ -64,14 +67,17 @@ impl RpcClientT for MyLoggingClient { async fn main() -> Result<(), Box> { // Instantiate our replacement RPC client. let log = Arc::default(); - let rpc_client = MyLoggingClient { - log: Arc::clone(&log), + let rpc_client = { + let inner = MyLoggingClient { + log: Arc::clone(&log), + }; + RpcClient::new(inner) }; // Pass this into our OnlineClient to instantiate it. This will lead to some // RPC calls being made to fetch chain details/metadata, which will immediately // fail.. - let _ = OnlineClient::::from_rpc_client(Arc::new(rpc_client)).await; + let _ = OnlineClient::::from_rpc_client(rpc_client).await; // But, we can see that the calls were made via our custom RPC client: println!("Log of calls made:\n\n{}", log.lock().unwrap().as_str()); diff --git a/subxt/examples/setup_client_offline.rs b/subxt/examples/setup_client_offline.rs index 10c606e3ce..f9b8bc53cf 100644 --- a/subxt/examples/setup_client_offline.rs +++ b/subxt/examples/setup_client_offline.rs @@ -15,10 +15,9 @@ async fn main() -> Result<(), Box> { }; // 2. A runtime version (system_version constant on a Substrate node has these): - let runtime_version = subxt::rpc::types::RuntimeVersion { + let runtime_version = subxt::backend::RuntimeVersion { spec_version: 9370, transaction_version: 20, - other: Default::default(), }; // 3. Metadata (I'll load it from the downloaded metadata, but you can use diff --git a/subxt/examples/storage_iterating.rs b/subxt/examples/storage_iterating.rs index 583efdaed9..99a4719c9b 100644 --- a/subxt/examples/storage_iterating.rs +++ b/subxt/examples/storage_iterating.rs @@ -13,14 +13,9 @@ async fn main() -> Result<(), Box> { // Get back an iterator of results (here, we are fetching 10 items at // a time from the node, but we always iterate over one at a time). - let mut results = api - .storage() - .at_latest() - .await? - .iter(storage_query, 10) - .await?; + let mut results = api.storage().at_latest().await?.iter(storage_query).await?; - while let Some((key, value)) = results.next().await? { + while let Some(Ok((key, value))) = results.next().await { println!("Key: 0x{}", hex::encode(&key)); println!("Value: {:?}", value); } diff --git a/subxt/examples/storage_iterating_dynamic.rs b/subxt/examples/storage_iterating_dynamic.rs index d9b4496030..acc334d7a6 100644 --- a/subxt/examples/storage_iterating_dynamic.rs +++ b/subxt/examples/storage_iterating_dynamic.rs @@ -11,14 +11,9 @@ async fn main() -> Result<(), Box> { let storage_query = subxt::dynamic::storage("System", "Account", keys); // Use that query to return an iterator over the results. - let mut results = api - .storage() - .at_latest() - .await? - .iter(storage_query, 10) - .await?; + let mut results = api.storage().at_latest().await?.iter(storage_query).await?; - while let Some((key, value)) = results.next().await? { + while let Some(Ok((key, value))) = results.next().await { println!("Key: 0x{}", hex::encode(&key)); println!("Value: {:?}", value.to_value()?); } diff --git a/subxt/examples/storage_iterating_partial.rs b/subxt/examples/storage_iterating_partial.rs index fe8c6ae26e..daf763e2ec 100644 --- a/subxt/examples/storage_iterating_partial.rs +++ b/subxt/examples/storage_iterating_partial.rs @@ -1,15 +1,13 @@ -use subxt::{OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] -pub mod polkadot {} - -use subxt::utils::AccountId32; -use subxt_signer::sr25519::{dev, Keypair}; - use polkadot::multisig::events::NewMultisig; use polkadot::runtime_types::{ frame_system::pallet::Call, polkadot_runtime::RuntimeCall, sp_weights::weight_v2::Weight, }; +use subxt::utils::AccountId32; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::{dev, Keypair}; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] +pub mod polkadot {} #[tokio::main] async fn main() -> Result<(), Box> { @@ -24,6 +22,7 @@ async fn main() -> Result<(), Box> { let new_multisig_1 = submit_remark_as_multi(&alice_signer, &bob, b"Hello", &api).await?; let new_multisig_2 = submit_remark_as_multi(&alice_signer, &bob, b"Hi", &api).await?; let new_multisig_3 = submit_remark_as_multi(&alice_signer, &charlie, b"Hello", &api).await?; + // Note: the NewMultisig event contains the multisig address we need to use for the storage queries: assert_eq!(new_multisig_1.multisig, new_multisig_2.multisig); assert_ne!(new_multisig_1.multisig, new_multisig_3.multisig); @@ -35,16 +34,10 @@ async fn main() -> Result<(), Box> { .multisig() .multisigs_iter1(alice_bob_account_id); - // Get back an iterator of results (here, we are fetching 10 items at - // a time from the node, but we always iterate over one at a time). - let mut results = api - .storage() - .at_latest() - .await? - .iter(storage_query, 10) - .await?; + // Get back an iterator of results. + let mut results = api.storage().at_latest().await?.iter(storage_query).await?; - while let Some((key, value)) = results.next().await? { + while let Some(Ok((key, value))) = results.next().await { println!("Key: 0x{}", hex::encode(&key)); println!("Value: {:?}", value); } diff --git a/subxt/examples/tx_status_stream.rs b/subxt/examples/tx_status_stream.rs index 66466174d8..47fd58321e 100644 --- a/subxt/examples/tx_status_stream.rs +++ b/subxt/examples/tx_status_stream.rs @@ -1,4 +1,3 @@ -use futures::StreamExt; use subxt::{tx::TxStatus, OnlineClient, PolkadotConfig}; use subxt_signer::sr25519::dev; @@ -26,7 +25,7 @@ async fn main() -> Result<(), Box> { while let Some(status) = balance_transfer_progress.next().await { match status? { // It's finalized in a block! - TxStatus::Finalized(in_block) => { + TxStatus::InFinalizedBlock(in_block) => { println!( "Transaction {:?} is finalized in block {:?}", in_block.extrinsic_hash(), diff --git a/subxt/src/backend/legacy/mod.rs b/subxt/src/backend/legacy/mod.rs new file mode 100644 index 0000000000..7ab449273c --- /dev/null +++ b/subxt/src/backend/legacy/mod.rs @@ -0,0 +1,469 @@ +// Copyright 2019-2023 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a legacy backend implementation, which relies +//! on the legacy RPC API methods. + +pub mod rpc_methods; + +use self::rpc_methods::TransactionStatus as RpcTransactionStatus; +use crate::backend::{ + rpc::RpcClient, Backend, BlockRef, RuntimeVersion, StorageResponse, StreamOf, StreamOfResults, + TransactionStatus, +}; +use crate::{config::Header, Config, Error}; +use async_trait::async_trait; +use futures::{future, future::Either, stream, Future, FutureExt, Stream, StreamExt}; +use std::collections::VecDeque; +use std::pin::Pin; +use std::task::{Context, Poll}; + +pub use rpc_methods::LegacyRpcMethods; + +/// The legacy backend. +#[derive(Debug, Clone)] +pub struct LegacyBackend { + methods: LegacyRpcMethods, +} + +impl LegacyBackend { + /// Instantiate a new backend which uses the legacy API methods. + pub fn new(client: RpcClient) -> Self { + Self { + methods: LegacyRpcMethods::new(client), + } + } +} + +impl super::sealed::Sealed for LegacyBackend {} + +#[async_trait] +impl Backend for LegacyBackend { + async fn storage_fetch_values( + &self, + keys: Vec>, + at: T::Hash, + ) -> Result, Error> { + let methods = self.methods.clone(); + + // For each key, return it + a future to get the result. + let iter = keys.into_iter().map(move |key| { + let methods = methods.clone(); + async move { + let res = methods.state_get_storage(&key, Some(at)).await?; + Ok(res.map(|value| StorageResponse { key, value })) + } + }); + + let s = stream::iter(iter) + // Resolve the future + .then(|fut| fut) + // Filter any Options out (ie if we didn't find a value at some key we return nothing for it). + .filter_map(|r| future::ready(r.transpose())); + + Ok(StreamOf(Box::pin(s))) + } + + async fn storage_fetch_descendant_keys( + &self, + key: Vec, + starting_at: Option>, + at: T::Hash, + ) -> Result>, Error> { + Ok(StreamOf(Box::pin(StorageFetchDescendantKeysStream { + at, + key, + methods: self.methods.clone(), + done: Default::default(), + keys: Default::default(), + keys_fut: Default::default(), + pagination_start_key: starting_at, + }))) + } + + async fn storage_fetch_descendant_values( + &self, + key: Vec, + at: T::Hash, + ) -> Result, Error> { + let keys_stream = StorageFetchDescendantKeysStream { + at, + key, + methods: self.methods.clone(), + done: Default::default(), + keys: Default::default(), + keys_fut: Default::default(), + pagination_start_key: Default::default(), + }; + + Ok(StreamOf(Box::pin(StorageFetchDescendantValuesStream { + keys: keys_stream, + next_key: None, + value_fut: Default::default(), + }))) + } + + async fn genesis_hash(&self) -> Result { + self.methods.genesis_hash().await + } + + async fn block_header(&self, at: T::Hash) -> Result, Error> { + self.methods.chain_get_header(Some(at)).await + } + + async fn block_body(&self, at: T::Hash) -> Result>>, Error> { + let Some(details) = self.methods.chain_get_block(Some(at)).await? else { + return Ok(None) + }; + Ok(Some( + details.block.extrinsics.into_iter().map(|b| b.0).collect(), + )) + } + + async fn latest_finalized_block_ref(&self) -> Result, Error> { + let hash = self.methods.chain_get_finalized_head().await?; + Ok(BlockRef::from_hash(hash)) + } + + async fn latest_best_block_ref(&self) -> Result, Error> { + let hash = self + .methods + .chain_get_block_hash(None) + .await? + .ok_or_else(|| Error::Other("Latest best block doesn't exist".into()))?; + Ok(BlockRef::from_hash(hash)) + } + + async fn current_runtime_version(&self) -> Result { + let details = self.methods.state_get_runtime_version(None).await?; + Ok(RuntimeVersion { + spec_version: details.spec_version, + transaction_version: details.transaction_version, + }) + } + + async fn stream_runtime_version(&self) -> Result, Error> { + let sub = self.methods.state_subscribe_runtime_version().await?; + let sub = sub.map(|r| { + r.map(|v| RuntimeVersion { + spec_version: v.spec_version, + transaction_version: v.transaction_version, + }) + }); + Ok(StreamOf(Box::pin(sub))) + } + + async fn stream_all_block_headers( + &self, + ) -> Result)>, Error> { + let sub = self.methods.chain_subscribe_all_heads().await?; + let sub = sub.map(|r| { + r.map(|h| { + let hash = h.hash(); + (h, BlockRef::from_hash(hash)) + }) + }); + Ok(StreamOf(Box::pin(sub))) + } + + async fn stream_best_block_headers( + &self, + ) -> Result)>, Error> { + let sub = self.methods.chain_subscribe_new_heads().await?; + let sub = sub.map(|r| { + r.map(|h| { + let hash = h.hash(); + (h, BlockRef::from_hash(hash)) + }) + }); + Ok(StreamOf(Box::pin(sub))) + } + + async fn stream_finalized_block_headers( + &self, + ) -> Result)>, Error> { + let sub: super::rpc::RpcSubscription<::Header> = + self.methods.chain_subscribe_finalized_heads().await?; + + // Get the last finalized block immediately so that the stream will emit every finalized block after this. + let last_finalized_block_ref = self.latest_finalized_block_ref().await?; + let last_finalized_block_num = self + .block_header(last_finalized_block_ref.hash()) + .await? + .map(|h| h.number().into()); + + // Fill in any missing blocks, because the backend may not emit every finalized block; just the latest ones which + // are finalized each time. + let sub = subscribe_to_block_headers_filling_in_gaps( + self.methods.clone(), + sub, + last_finalized_block_num, + ); + let sub = sub.map(|r| { + r.map(|h| { + let hash = h.hash(); + (h, BlockRef::from_hash(hash)) + }) + }); + Ok(StreamOf(Box::pin(sub))) + } + + async fn submit_transaction( + &self, + extrinsic: &[u8], + ) -> Result>, Error> { + let sub = self + .methods + .author_submit_and_watch_extrinsic(extrinsic) + .await?; + let sub = sub.filter_map(|r| { + let mapped = r + .map(|tx| { + match tx { + // We ignore these because they don't map nicely to the new API. They don't signal "end states" so this should be fine. + RpcTransactionStatus::Future => None, + RpcTransactionStatus::Retracted(_) => None, + // These roughly map across: + RpcTransactionStatus::Ready => Some(TransactionStatus::Validated), + RpcTransactionStatus::Broadcast(peers) => { + Some(TransactionStatus::Broadcasted { + num_peers: peers.len() as u32, + }) + } + RpcTransactionStatus::InBlock(hash) => { + Some(TransactionStatus::InBestBlock { hash }) + } + // These 5 mean that the stream will very likely end: + RpcTransactionStatus::FinalityTimeout(_) => { + Some(TransactionStatus::Invalid { + message: "Finality timeout".into(), + }) + } + RpcTransactionStatus::Finalized(hash) => { + Some(TransactionStatus::InFinalizedBlock { hash }) + } + RpcTransactionStatus::Usurped(_) => Some(TransactionStatus::Invalid { + message: "Transaction was usurped by another with the same nonce" + .into(), + }), + RpcTransactionStatus::Dropped => Some(TransactionStatus::Dropped { + message: "Transaction was dropped".into(), + }), + RpcTransactionStatus::Invalid => Some(TransactionStatus::Invalid { + message: + "Transaction is invalid (eg because of a bad nonce, signature etc)" + .into(), + }), + } + }) + .transpose(); + + future::ready(mapped) + }); + Ok(StreamOf(Box::pin(sub))) + } + + async fn call( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: T::Hash, + ) -> Result, Error> { + self.methods + .state_call(method, call_parameters, Some(at)) + .await + } +} + +/// Note: This is exposed for testing but is not considered stable and may change +/// without notice in a patch release. +#[doc(hidden)] +pub fn subscribe_to_block_headers_filling_in_gaps( + methods: LegacyRpcMethods, + sub: S, + mut last_block_num: Option, +) -> impl Stream> + Send +where + T: Config, + S: Stream> + Send, + E: Into + Send + 'static, +{ + sub.flat_map(move |s| { + // Get the header, or return a stream containing just the error. + let header = match s { + Ok(header) => header, + Err(e) => return Either::Left(stream::once(async { Err(e.into()) })), + }; + + // We want all previous details up to, but not including this current block num. + let end_block_num = header.number().into(); + + // This is one after the last block we returned details for last time. + let start_block_num = last_block_num.map(|n| n + 1).unwrap_or(end_block_num); + + // Iterate over all of the previous blocks we need headers for, ignoring the current block + // (which we already have the header info for): + let methods = methods.clone(); + let previous_headers = stream::iter(start_block_num..end_block_num) + .then(move |n| { + let methods = methods.clone(); + async move { + let hash = methods.chain_get_block_hash(Some(n.into())).await?; + let header = methods.chain_get_header(hash).await?; + Ok::<_, Error>(header) + } + }) + .filter_map(|h| async { h.transpose() }); + + // On the next iteration, we'll get details starting just after this end block. + last_block_num = Some(end_block_num); + + // Return a combination of any previous headers plus the new header. + Either::Right(previous_headers.chain(stream::once(async { Ok(header) }))) + }) +} + +/// This provides a stream of values given some prefix `key`. It +/// internally manages pagination and such. +pub struct StorageFetchDescendantKeysStream { + methods: LegacyRpcMethods, + key: Vec, + at: T::Hash, + // What key do we start paginating from? None = from the beginning. + pagination_start_key: Option>, + // Keys, future and cached: + keys_fut: Option>, Error>> + Send + 'static>>>, + keys: VecDeque>, + // Set to true when we're done: + done: bool, +} + +impl std::marker::Unpin for StorageFetchDescendantKeysStream {} + +// How many storage keys to ask for each time. +const STORAGE_FETCH_PAGE_SIZE: u32 = 32; + +impl Stream for StorageFetchDescendantKeysStream { + type Item = Result, Error>; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + let mut this = self.as_mut(); + + // We're already done. + if this.done { + return Poll::Ready(None); + } + + // We have some keys to hand back already, so do that. + if let Some(key) = this.keys.pop_front() { + return Poll::Ready(Some(Ok(key))); + } + + // Else, we don't have any keys, but we have a fut to get more so poll it. + if let Some(mut keys_fut) = this.keys_fut.take() { + let Poll::Ready(keys) = keys_fut.poll_unpin(cx) else { + this.keys_fut = Some(keys_fut); + return Poll::Pending + }; + + match keys { + Ok(keys) => { + if keys.is_empty() { + // No keys left; we're done! + this.done = true; + return Poll::Ready(None); + } + // The last key is where we want to paginate from next time. + this.pagination_start_key = keys.last().cloned(); + // Got new keys; loop around to start returning them. + this.keys = keys.into_iter().collect(); + continue; + } + Err(e) => { + // Error getting keys? Return it. + return Poll::Ready(Some(Err(e))); + } + } + } + + // Else, we don't have a fut to get keys yet so start one going. + let methods = this.methods.clone(); + let key = this.key.clone(); + let at = this.at; + let pagination_start_key = this.pagination_start_key.take(); + let keys_fut = async move { + methods + .state_get_keys_paged( + &key, + STORAGE_FETCH_PAGE_SIZE, + pagination_start_key.as_deref(), + Some(at), + ) + .await + }; + this.keys_fut = Some(Box::pin(keys_fut)); + } + } +} + +/// This provides a stream of values given some stream of keys. +pub struct StorageFetchDescendantValuesStream { + // Stream of keys. + keys: StorageFetchDescendantKeysStream, + next_key: Option>, + // Then we track the next value: + value_fut: + Option>, Error>> + Send + 'static>>>, +} + +impl Stream for StorageFetchDescendantValuesStream { + type Item = Result; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.as_mut(); + loop { + // If we're waiting on the next value then poll that future: + if let Some(mut value_fut) = this.value_fut.take() { + match value_fut.poll_unpin(cx) { + Poll::Ready(Ok(Some(value))) => { + let key = this.next_key.take().expect("key should exist"); + return Poll::Ready(Some(Ok(StorageResponse { key, value }))); + } + Poll::Ready(Ok(None)) => { + // No value back for some key? Skip. + continue; + } + Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e))), + Poll::Pending => { + this.value_fut = Some(value_fut); + return Poll::Pending; + } + } + } + + // Else, if we have the next key then let's start waiting on the next value. + if let Some(key) = &this.next_key { + let key = key.clone(); + let methods = this.keys.methods.clone(); + let at = this.keys.at; + let fut = async move { methods.state_get_storage(&key, Some(at)).await }; + + this.value_fut = Some(Box::pin(fut)); + continue; + } + + // Else, poll the keys stream to get the next key. + match this.keys.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(key))) => { + this.next_key = Some(key); + continue; + } + Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e))), + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => { + return Poll::Pending; + } + } + } + } +} diff --git a/subxt/src/backend/legacy/rpc_methods.rs b/subxt/src/backend/legacy/rpc_methods.rs new file mode 100644 index 0000000000..b3a46e122f --- /dev/null +++ b/subxt/src/backend/legacy/rpc_methods.rs @@ -0,0 +1,532 @@ +// Copyright 2019-2023 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! An interface to call the raw legacy RPC methods. + +use crate::backend::rpc::{rpc_params, RpcClient, RpcSubscription}; +use crate::metadata::Metadata; +use crate::{Config, Error}; +use codec::Decode; +use primitive_types::U256; +use serde::{Deserialize, Serialize}; + +/// An interface to call the legacy RPC methods. This interface is instantiated with +/// some `T: Config` trait which determines some of the types that the RPC methods will +/// take or hand back. +pub struct LegacyRpcMethods { + client: RpcClient, + _marker: std::marker::PhantomData, +} + +impl Clone for LegacyRpcMethods { + fn clone(&self) -> Self { + Self { + client: self.client.clone(), + _marker: self._marker, + } + } +} + +impl std::fmt::Debug for LegacyRpcMethods { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("LegacyRpcMethods") + .field("client", &self.client) + .field("_marker", &self._marker) + .finish() + } +} + +impl LegacyRpcMethods { + /// Instantiate the legacy RPC method interface. + pub fn new(client: RpcClient) -> Self { + LegacyRpcMethods { + client, + _marker: std::marker::PhantomData, + } + } + + /// Fetch the raw bytes for a given storage key + pub async fn state_get_storage( + &self, + key: &[u8], + hash: Option, + ) -> Result, Error> { + let params = rpc_params![to_hex(key), hash]; + let data: Option = self.client.request("state_getStorage", params).await?; + Ok(data.map(|b| b.0)) + } + + /// Returns the keys with prefix with pagination support. + /// Up to `count` keys will be returned. + /// If `start_key` is passed, return next keys in storage in lexicographic order. + pub async fn state_get_keys_paged( + &self, + key: &[u8], + count: u32, + start_key: Option<&[u8]>, + at: Option, + ) -> Result, Error> { + let start_key = start_key.map(to_hex); + let params = rpc_params![to_hex(key), count, start_key, at]; + let data: Vec = self.client.request("state_getKeysPaged", params).await?; + Ok(data.into_iter().map(|b| b.0).collect()) + } + + /// Fetch the genesis hash + pub async fn genesis_hash(&self) -> Result { + let block_zero = 0u32; + let params = rpc_params![block_zero]; + let genesis_hash: Option = + self.client.request("chain_getBlockHash", params).await?; + genesis_hash.ok_or_else(|| "Genesis hash not found".into()) + } + + /// Fetch the metadata via the legacy `state_getMetadata` RPC method. + pub async fn state_get_metadata(&self, at: Option) -> Result { + let bytes: Bytes = self + .client + .request("state_getMetadata", rpc_params![at]) + .await?; + let metadata = Metadata::decode(&mut &bytes[..])?; + Ok(metadata) + } + + /// Fetch system health + pub async fn system_health(&self) -> Result { + self.client.request("system_health", rpc_params![]).await + } + + /// Fetch system chain + pub async fn system_chain(&self) -> Result { + self.client.request("system_chain", rpc_params![]).await + } + + /// Fetch system name + pub async fn system_name(&self) -> Result { + self.client.request("system_name", rpc_params![]).await + } + + /// Fetch system version + pub async fn system_version(&self) -> Result { + self.client.request("system_version", rpc_params![]).await + } + + /// Fetch system properties + pub async fn system_properties(&self) -> Result { + self.client + .request("system_properties", rpc_params![]) + .await + } + + /// Get a header + pub async fn chain_get_header( + &self, + hash: Option, + ) -> Result, Error> { + let params = rpc_params![hash]; + let header = self.client.request("chain_getHeader", params).await?; + Ok(header) + } + + /// Get a block hash, returns hash of latest _best_ block by default. + pub async fn chain_get_block_hash( + &self, + block_number: Option, + ) -> Result, Error> { + let params = rpc_params![block_number]; + let block_hash = self.client.request("chain_getBlockHash", params).await?; + Ok(block_hash) + } + + /// Get a block hash of the latest finalized block + pub async fn chain_get_finalized_head(&self) -> Result { + let hash = self + .client + .request("chain_getFinalizedHead", rpc_params![]) + .await?; + Ok(hash) + } + + /// Get a Block + pub async fn chain_get_block( + &self, + hash: Option, + ) -> Result>, Error> { + let params = rpc_params![hash]; + let block = self.client.request("chain_getBlock", params).await?; + Ok(block) + } + + /// Fetch the runtime version + pub async fn state_get_runtime_version( + &self, + at: Option, + ) -> Result { + let params = rpc_params![at]; + let version = self + .client + .request("state_getRuntimeVersion", params) + .await?; + Ok(version) + } + + /// Subscribe to all new best block headers. + pub async fn chain_subscribe_new_heads(&self) -> Result, Error> { + let subscription = self + .client + .subscribe( + // Despite the name, this returns a stream of all new blocks + // imported by the node that happen to be added to the current best chain + // (ie all best blocks). + "chain_subscribeNewHeads", + rpc_params![], + "chain_unsubscribeNewHeads", + ) + .await?; + + Ok(subscription) + } + + /// Subscribe to all new block headers. + pub async fn chain_subscribe_all_heads(&self) -> Result, Error> { + let subscription = self + .client + .subscribe( + // Despite the name, this returns a stream of all new blocks + // imported by the node that happen to be added to the current best chain + // (ie all best blocks). + "chain_subscribeAllHeads", + rpc_params![], + "chain_unsubscribeAllHeads", + ) + .await?; + + Ok(subscription) + } + + /// Subscribe to finalized block headers. + /// + /// Note: this may not produce _every_ block in the finalized chain; + /// sometimes multiple blocks are finalized at once, and in this case only the + /// latest one is returned. the higher level APIs that use this "fill in" the + /// gaps for us. + pub async fn chain_subscribe_finalized_heads( + &self, + ) -> Result, Error> { + let subscription = self + .client + .subscribe( + "chain_subscribeFinalizedHeads", + rpc_params![], + "chain_unsubscribeFinalizedHeads", + ) + .await?; + Ok(subscription) + } + + /// Subscribe to runtime version updates that produce changes in the metadata. + /// The first item emitted by the stream is the current runtime version. + pub async fn state_subscribe_runtime_version( + &self, + ) -> Result, Error> { + let subscription = self + .client + .subscribe( + "state_subscribeRuntimeVersion", + rpc_params![], + "state_unsubscribeRuntimeVersion", + ) + .await?; + Ok(subscription) + } + + /// Create and submit an extrinsic and return corresponding Hash if successful + pub async fn author_submit_extrinsic(&self, extrinsic: &[u8]) -> Result { + let params = rpc_params![to_hex(extrinsic)]; + let xt_hash = self + .client + .request("author_submitExtrinsic", params) + .await?; + Ok(xt_hash) + } + + /// Create and submit an extrinsic and return a subscription to the events triggered. + pub async fn author_submit_and_watch_extrinsic( + &self, + extrinsic: &[u8], + ) -> Result>, Error> { + let params = rpc_params![to_hex(extrinsic)]; + let subscription = self + .client + .subscribe( + "author_submitAndWatchExtrinsic", + params, + "author_unwatchExtrinsic", + ) + .await?; + Ok(subscription) + } + + /// Execute a runtime API call via `state_call` RPC method. + pub async fn state_call( + &self, + function: &str, + call_parameters: Option<&[u8]>, + at: Option, + ) -> Result, Error> { + let call_parameters = call_parameters.unwrap_or_default(); + let bytes: Bytes = self + .client + .request( + "state_call", + rpc_params![function, to_hex(call_parameters), at], + ) + .await?; + Ok(bytes.0) + } + + /// Submits the extrinsic to the dry_run RPC, to test if it would succeed. + /// + /// Returns a [`DryRunResult`], which is the result of performing the dry run. + pub async fn dry_run( + &self, + encoded_signed: &[u8], + at: Option, + ) -> Result { + let params = rpc_params![to_hex(encoded_signed), at]; + let result_bytes: Bytes = self.client.request("system_dryRun", params).await?; + Ok(DryRunResultBytes(result_bytes.0)) + } +} + +/// Storage key. +pub type StorageKey = Vec; + +/// Storage data. +pub type StorageData = Vec; + +/// Health struct returned by the RPC +#[derive(Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct SystemHealth { + /// Number of connected peers + pub peers: usize, + /// Is the node syncing + pub is_syncing: bool, + /// Should this node have any peers + /// + /// Might be false for local chains or when running without discovery. + pub should_have_peers: bool, +} + +/// System properties; an arbitrary JSON object. +pub type SystemProperties = serde_json::Map; + +/// A block number +pub type BlockNumber = NumberOrHex; + +/// The response from `chain_getBlock` +#[derive(Debug, Deserialize)] +#[serde(bound = "T: Config")] +pub struct BlockDetails { + /// The block itself. + pub block: Block, + /// Block justification. + pub justifications: Option>, +} + +/// Block details in the [`BlockDetails`]. +#[derive(Debug, Deserialize)] +pub struct Block { + /// The block header. + pub header: T::Header, + /// The accompanying extrinsics. + pub extrinsics: Vec, +} + +/// An abstraction over justification for a block's validity under a consensus algorithm. +pub type BlockJustification = (ConsensusEngineId, EncodedJustification); +/// Consensus engine unique ID. +pub type ConsensusEngineId = [u8; 4]; +/// The encoded justification specific to a consensus engine. +pub type EncodedJustification = Vec; + +/// This contains the runtime version information necessary to make transactions, as obtained from +/// the RPC call `state_getRuntimeVersion`, +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RuntimeVersion { + /// Version of the runtime specification. A full-node will not attempt to use its native + /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + /// `spec_version` and `authoring_version` are the same between Wasm and native. + pub spec_version: u32, + + /// All existing dispatches are fully compatible when this number doesn't change. If this + /// number changes, then `spec_version` must change, also. + /// + /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, + /// either through an alteration in its user-level semantics, a parameter + /// added/removed/changed, a dispatchable being removed, a module being removed, or a + /// dispatchable/module changing its index. + /// + /// It need *not* change when a new module is added or when a dispatchable is added. + pub transaction_version: u32, + + /// Fields unnecessary to Subxt are written out to this map. + #[serde(flatten)] + pub other: std::collections::HashMap, +} + +/// Possible transaction status events. +/// +/// # Note +/// +/// This is copied from `sp-transaction-pool` to avoid a dependency on that crate. Therefore it +/// must be kept compatible with that type from the target substrate version. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum TransactionStatus { + /// Transaction is part of the future queue. + Future, + /// Transaction is part of the ready queue. + Ready, + /// The transaction has been broadcast to the given peers. + Broadcast(Vec), + /// Transaction has been included in block with given hash. + InBlock(Hash), + /// The block this transaction was included in has been retracted. + Retracted(Hash), + /// Maximum number of finality watchers has been reached, + /// old watchers are being removed. + FinalityTimeout(Hash), + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA + Finalized(Hash), + /// Transaction has been replaced in the pool, by another transaction + /// that provides the same tags. (e.g. same (sender, nonce)). + Usurped(Hash), + /// Transaction has been dropped from the pool because of the limit. + Dropped, + /// Transaction is no longer valid in the current state. + Invalid, +} + +/// Hex-serialized shim for `Vec`. +#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord, Debug)] +pub struct Bytes(#[serde(with = "impl_serde::serialize")] pub Vec); +impl std::ops::Deref for Bytes { + type Target = [u8]; + fn deref(&self) -> &[u8] { + &self.0[..] + } +} +impl From> for Bytes { + fn from(s: Vec) -> Self { + Bytes(s) + } +} + +/// The decoded result returned from calling `system_dryRun` on some extrinsic. +#[derive(Debug, PartialEq, Eq)] +pub enum DryRunResult { + /// The transaction could be included in the block and executed. + Success, + /// The transaction could be included in the block, but the call failed to dispatch. + DispatchError(crate::error::DispatchError), + /// The transaction could not be included in the block. + TransactionValidityError, +} + +/// The bytes representing an error dry running an extrinsic. call [`DryRunResultBytes::into_dry_run_result`] +/// to attempt to decode this into something more meaningful. +pub struct DryRunResultBytes(pub Vec); + +impl DryRunResultBytes { + /// Attempt to decode the error bytes into a [`DryRunResult`] using the provided [`Metadata`]. + pub fn into_dry_run_result( + self, + metadata: &crate::metadata::Metadata, + ) -> Result { + // dryRun returns an ApplyExtrinsicResult, which is basically a + // `Result, TransactionValidityError>`. + let bytes = self.0; + if bytes[0] == 0 && bytes[1] == 0 { + // Ok(Ok(())); transaction is valid and executed ok + Ok(DryRunResult::Success) + } else if bytes[0] == 0 && bytes[1] == 1 { + // Ok(Err(dispatch_error)); transaction is valid but execution failed + let dispatch_error = + crate::error::DispatchError::decode_from(&bytes[2..], metadata.clone())?; + Ok(DryRunResult::DispatchError(dispatch_error)) + } else if bytes[0] == 1 { + // Err(transaction_error); some transaction validity error (we ignore the details at the moment) + Ok(DryRunResult::TransactionValidityError) + } else { + // unable to decode the bytes; they aren't what we expect. + Err(crate::Error::Unknown(bytes)) + } + } +} + +/// A number type that can be serialized both as a number or a string that encodes a number in a +/// string. +/// +/// We allow two representations of the block number as input. Either we deserialize to the type +/// that is specified in the block type or we attempt to parse given hex value. +/// +/// The primary motivation for having this type is to avoid overflows when using big integers in +/// JavaScript (which we consider as an important RPC API consumer). +#[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(untagged)] +pub enum NumberOrHex { + /// The number represented directly. + Number(u64), + /// Hex representation of the number. + Hex(U256), +} + +impl NumberOrHex { + /// Converts this number into an U256. + pub fn into_u256(self) -> U256 { + match self { + NumberOrHex::Number(n) => n.into(), + NumberOrHex::Hex(h) => h, + } + } +} + +impl From for U256 { + fn from(num_or_hex: NumberOrHex) -> U256 { + num_or_hex.into_u256() + } +} + +macro_rules! into_number_or_hex { + ($($t: ty)+) => { + $( + impl From<$t> for NumberOrHex { + fn from(x: $t) -> Self { + NumberOrHex::Number(x.into()) + } + } + )+ + } +} +into_number_or_hex!(u8 u16 u32 u64); + +impl From for NumberOrHex { + fn from(n: u128) -> Self { + NumberOrHex::Hex(n.into()) + } +} + +impl From for NumberOrHex { + fn from(n: U256) -> Self { + NumberOrHex::Hex(n) + } +} + +/// A quick helper to encode some bytes to hex. +fn to_hex(bytes: impl AsRef<[u8]>) -> String { + format!("0x{}", hex::encode(bytes.as_ref())) +} diff --git a/subxt/src/backend/mod.rs b/subxt/src/backend/mod.rs new file mode 100644 index 0000000000..b5fda66a16 --- /dev/null +++ b/subxt/src/backend/mod.rs @@ -0,0 +1,351 @@ +// Copyright 2019-2023 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a backend trait for Subxt which allows us to get and set +//! the necessary information (probably from a JSON-RPC API, but that's up to the +//! implementation). + +pub mod legacy; +pub mod rpc; + +use crate::error::Error; +use crate::metadata::Metadata; +use crate::Config; +use async_trait::async_trait; +use codec::{Decode, Encode}; +use futures::{Stream, StreamExt}; +use std::pin::Pin; +use std::sync::Arc; + +/// Prevent the backend trait being implemented externally. +#[doc(hidden)] +pub(crate) mod sealed { + pub trait Sealed {} +} + +/// This trait exposes the interface that Subxt will use to communicate with +/// a backend. Its goal is to be as minimal as possible. +#[async_trait] +pub trait Backend: sealed::Sealed + Send + Sync + 'static { + /// Fetch values from storage. + async fn storage_fetch_values( + &self, + keys: Vec>, + at: T::Hash, + ) -> Result, Error>; + + /// Fetch keys underneath the given key from storage. + async fn storage_fetch_descendant_keys( + &self, + key: Vec, + starting_at: Option>, + at: T::Hash, + ) -> Result>, Error>; + + /// Fetch values underneath the given key from storage. + async fn storage_fetch_descendant_values( + &self, + key: Vec, + at: T::Hash, + ) -> Result, Error>; + + /// Fetch the genesis hash + async fn genesis_hash(&self) -> Result; + + /// Get a block header + async fn block_header(&self, at: T::Hash) -> Result, Error>; + + /// Return the extrinsics found in the block. Each extrinsic is represented + /// by a vector of bytes which has _not_ been SCALE decoded (in other words, the + /// first bytes in the vector will decode to the compact encoded length of the extrinsic) + async fn block_body(&self, at: T::Hash) -> Result>>, Error>; + + /// Get the most recent finalized block hash. + /// Note: needed only in blocks client for finalized block stream; can prolly be removed. + async fn latest_finalized_block_ref(&self) -> Result, Error>; + + /// Get the most recent best block hash. + /// Note: needed only in blocks client for finalized block stream; can prolly be removed. + async fn latest_best_block_ref(&self) -> Result, Error>; + + /// Get information about the current runtime. + async fn current_runtime_version(&self) -> Result; + + /// A stream of all new runtime versions as they occur. + async fn stream_runtime_version(&self) -> Result, Error>; + + /// A stream of all new block headers as they arrive. + async fn stream_all_block_headers( + &self, + ) -> Result)>, Error>; + + /// A stream of best block headers. + async fn stream_best_block_headers( + &self, + ) -> Result)>, Error>; + + /// A stream of finalized block headers. + async fn stream_finalized_block_headers( + &self, + ) -> Result)>, Error>; + + /// Submit a transaction. This will return a stream of events about it. + async fn submit_transaction( + &self, + bytes: &[u8], + ) -> Result>, Error>; + + /// Make a call to some runtime API. + async fn call( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: T::Hash, + ) -> Result, Error>; +} + +/// helpeful utility methods derived from those provided on [`Backend`] +#[async_trait] +pub trait BackendExt: Backend { + /// Fetch a single value from storage. + async fn storage_fetch_value( + &self, + key: Vec, + at: T::Hash, + ) -> Result>, Error> { + self.storage_fetch_values(vec![key], at) + .await? + .next() + .await + .transpose() + .map(|o| o.map(|s| s.value)) + } + + /// The same as a [`Backend::call()`], but it will also attempt to decode the + /// result into the given type, which is a fairly common operation. + async fn call_decoding( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: T::Hash, + ) -> Result { + let bytes = self.call(method, call_parameters, at).await?; + let res = D::decode(&mut &*bytes)?; + Ok(res) + } + + /// Return the metadata at some version. + async fn metadata_at_version(&self, version: u32, at: T::Hash) -> Result { + let param = version.encode(); + + let opaque: Option = self + .call_decoding("Metadata_metadata_at_version", Some(¶m), at) + .await?; + let Some(opaque) = opaque else { + return Err(Error::Other("Metadata version not found".into())); + }; + + let metadata: Metadata = Decode::decode(&mut &opaque.0[..])?; + Ok(metadata) + } + + /// Return V14 metadata from the legacy `Metadata_metadata` call. + async fn legacy_metadata(&self, at: T::Hash) -> Result { + let opaque: frame_metadata::OpaqueMetadata = + self.call_decoding("Metadata_metadata", None, at).await?; + let metadata: Metadata = Decode::decode(&mut &opaque.0[..])?; + Ok(metadata) + } +} + +#[async_trait] +impl + ?Sized, T: Config> BackendExt for B {} + +/// An opaque struct which, while alive, indicates that some references to a block +/// still exist. This gives the backend the opportunity to keep the corresponding block +/// details around for a while if it likes and is able to. No guarantees can be made about +/// how long the corresponding details might be available for, but if no references to a block +/// exist, then the backend is free to discard any details for it. +#[derive(Clone)] +pub struct BlockRef { + hash: H, + // We keep this around so that when it is dropped, it has the + // opportunity to tell the backend. + _pointer: Option>, +} + +impl From for BlockRef { + fn from(value: H) -> Self { + BlockRef::from_hash(value) + } +} + +impl PartialEq for BlockRef { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + } +} +impl Eq for BlockRef {} + +impl PartialOrd for BlockRef { + fn partial_cmp(&self, other: &Self) -> Option { + self.hash.partial_cmp(&other.hash) + } +} + +impl Ord for BlockRef { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.hash.cmp(&other.hash) + } +} + +impl std::fmt::Debug for BlockRef { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("BlockRef").field(&self.hash).finish() + } +} + +impl std::hash::Hash for BlockRef { + fn hash(&self, state: &mut Hasher) { + self.hash.hash(state); + } +} + +impl BlockRef { + /// A [`BlockRef`] that doesn't reference a given block, but does have an associated hash. + /// This is used in the legacy backend, which has no notion of pinning blocks. + pub fn from_hash(hash: H) -> Self { + Self { + hash, + _pointer: None, + } + } + /// Construct a [`BlockRef`] from an instance of the underlying trait. It's expected + /// that the [`Backend`] implementation will call this if it wants to track which blocks + /// are potentially in use. + pub fn new(hash: H, inner: P) -> Self { + Self { + hash, + _pointer: Some(Arc::new(inner)), + } + } + + /// Return the hash of the referenced block. + pub fn hash(&self) -> H + where + H: Copy, + { + self.hash + } +} + +/// A trait that a [`Backend`] can implement to know when some block +/// can be unpinned: when this is dropped, there are no remaining references +/// to the block that it's associated with. +pub trait BlockRefT: Send + Sync + 'static {} + +/// A stream of some item. +pub struct StreamOf(Pin + Send + 'static>>); + +impl Stream for StreamOf { + type Item = T; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.0.poll_next_unpin(cx) + } +} + +impl std::fmt::Debug for StreamOf { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("StreamOf").field(&"").finish() + } +} + +impl StreamOf { + /// Construct a new stream. + pub fn new(inner: Pin + Send + 'static>>) -> Self { + StreamOf(inner) + } + + /// Returns the next item in the stream. This is just a wrapper around + /// [`StreamExt::next()`] so that you can avoid the extra import. + pub async fn next(&mut self) -> Option { + StreamExt::next(self).await + } +} + +/// A stream of [`Result`]. +pub type StreamOfResults = StreamOf>; + +/// Runtime version information needed to submit transactions. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RuntimeVersion { + /// Version of the runtime specification. A full-node will not attempt to use its native + /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + /// `spec_version` and `authoring_version` are the same between Wasm and native. + pub spec_version: u32, + + /// All existing dispatches are fully compatible when this number doesn't change. If this + /// number changes, then `spec_version` must change, also. + /// + /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, + /// either through an alteration in its user-level semantics, a parameter + /// added/removed/changed, a dispatchable being removed, a module being removed, or a + /// dispatchable/module changing its index. + /// + /// It need *not* change when a new module is added or when a dispatchable is added. + pub transaction_version: u32, +} + +/// The status of the transaction. +/// +/// If the status is [`TransactionStatus::InFinalizedBlock`], [`TransactionStatus::Error`], +/// [`TransactionStatus::Invalid`] or [`TransactionStatus::Dropped`], then no future +/// events will be emitted. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum TransactionStatus { + /// Transaction is part of the future queue. + Validated, + /// The transaction has been broadcast to other nodes. + Broadcasted { + /// Number of peers it's been broadcast to. + num_peers: u32, + }, + /// Transaction has been included in block with given hash. + InBestBlock { + /// Block hash the transaction is in. + hash: Hash, + }, + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA + InFinalizedBlock { + /// Block hash the transaction is in. + hash: Hash, + }, + /// Something went wrong in the node. + Error { + /// Human readable message; what went wrong. + message: String, + }, + /// Transaction is invalid (bad nonce, signature etc). + Invalid { + /// Human readable message; why was it invalid. + message: String, + }, + /// The transaction was dropped. + Dropped { + /// Human readable message; why was it dropped. + message: String, + }, +} + +/// A response from calls like [`Backend::storage_fetch_values`] or +/// [`Backend::storage_fetch_descendant_values`]. +pub struct StorageResponse { + /// The key. + pub key: Vec, + /// The associated value. + pub value: Vec, +} diff --git a/subxt/src/rpc/jsonrpsee_impl.rs b/subxt/src/backend/rpc/jsonrpsee_impl.rs similarity index 89% rename from subxt/src/rpc/jsonrpsee_impl.rs rename to subxt/src/backend/rpc/jsonrpsee_impl.rs index 538d24d742..7c2a367f34 100644 --- a/subxt/src/rpc/jsonrpsee_impl.rs +++ b/subxt/src/backend/rpc/jsonrpsee_impl.rs @@ -2,7 +2,7 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -use super::{RpcClientT, RpcFuture, RpcSubscription}; +use super::{RawRpcFuture, RawRpcSubscription, RpcClientT}; use crate::error::RpcError; use futures::stream::{StreamExt, TryStreamExt}; use jsonrpsee::{ @@ -28,7 +28,7 @@ impl RpcClientT for Client { &'a self, method: &'a str, params: Option>, - ) -> RpcFuture<'a, Box> { + ) -> RawRpcFuture<'a, Box> { Box::pin(async move { let res = ClientT::request(self, method, Params(params)) .await @@ -42,7 +42,7 @@ impl RpcClientT for Client { sub: &'a str, params: Option>, unsub: &'a str, - ) -> RpcFuture<'a, RpcSubscription> { + ) -> RawRpcFuture<'a, RawRpcSubscription> { Box::pin(async move { let stream = SubscriptionClientT::subscribe::, _>( self, @@ -63,7 +63,7 @@ impl RpcClientT for Client { let stream = stream .map_err(|e| RpcError::ClientError(Box::new(e))) .boxed(); - Ok(RpcSubscription { stream, id }) + Ok(RawRpcSubscription { stream, id }) }) } } diff --git a/subxt/src/backend/rpc/mod.rs b/subxt/src/backend/rpc/mod.rs new file mode 100644 index 0000000000..0368a4a5c5 --- /dev/null +++ b/subxt/src/backend/rpc/mod.rs @@ -0,0 +1,67 @@ +// Copyright 2019-2023 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! RPC types and client for interacting with a substrate node. +//! +//! These are used behind the scenes by Subxt backend implementations, for +//! example [`crate::backend::legacy::LegacyBackend`]. If you need an RPC client, +//! then you can manually instantiate one, and then hand it to Subxt if you'd like +//! to re-use it for the Subxt connection. +//! +//! - [`RpcClientT`] is the underlying dynamic RPC implementation. This provides +//! the low level [`RpcClientT::request_raw`] and [`RpcClientT::subscribe_raw`] +//! methods. +//! - [`RpcClient`] is the higher level wrapper around this, offering +//! the [`RpcClient::request`] and [`RpcClient::subscribe`] methods. +//! +//! # Example +//! +//! Fetching the genesis hash. +//! +//! ```no_run +//! # #[tokio::main] +//! # async fn main() { +//! use subxt::{ +//! client::OnlineClient, +//! config::SubstrateConfig, +//! backend::rpc::RpcClient, +//! backend::legacy::LegacyRpcMethods, +//! }; +//! +//! // Instantiate a default RPC client pointing at some URL. +//! let rpc_client = RpcClient::from_url("ws://localhost:9944") +//! .await +//! .unwrap(); +//! +//! // Instantiate the legacy RPC interface, providing an appropriate +//! // config so that it uses the correct types for your chain. +//! let rpc_methods = LegacyRpcMethods::::new(rpc_client.clone()); +//! +//! // Use it to make RPC calls, here using the legacy genesis_hash method. +//! let genesis_hash = rpc_methods +//! .genesis_hash() +//! .await +//! .unwrap(); +//! +//! println!("{genesis_hash}"); +//! +//! // Instantiate the Subxt interface using the same client and config if you +//! // want to reuse the same connection: +//! let client = OnlineClient::::from_rpc_client(rpc_client); +//! # } +//! ``` + +// Allow an `rpc.rs` file in the `rpc` folder to align better +// with other file names for their types. +#![allow(clippy::module_inception)] + +#[cfg(feature = "jsonrpsee")] +mod jsonrpsee_impl; + +mod rpc_client; +mod rpc_client_t; + +pub use rpc_client_t::{RawRpcFuture, RawRpcSubscription, RawValue, RpcClientT}; + +pub use rpc_client::{rpc_params, RpcClient, RpcParams, RpcSubscription}; diff --git a/subxt/src/rpc/rpc_client.rs b/subxt/src/backend/rpc/rpc_client.rs similarity index 58% rename from subxt/src/rpc/rpc_client.rs rename to subxt/src/backend/rpc/rpc_client.rs index e5e408d4f8..0e4bb62940 100644 --- a/subxt/src/rpc/rpc_client.rs +++ b/subxt/src/backend/rpc/rpc_client.rs @@ -2,24 +2,36 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -use super::{RpcClientT, RpcSubscription, RpcSubscriptionId}; +use super::{RawRpcSubscription, RpcClientT}; use crate::error::Error; use futures::{Stream, StreamExt}; use serde::{de::DeserializeOwned, Serialize}; use serde_json::value::RawValue; use std::{pin::Pin, sync::Arc, task::Poll}; -/// A concrete wrapper around an [`RpcClientT`] which exposes the udnerlying interface via some -/// higher level methods that make it a little easier to work with. -/// -/// Wrapping [`RpcClientT`] in this way is simply a way to expose this additional functionality -/// without getting into issues with non-object-safe methods or no `async` in traits. +/// A concrete wrapper around an [`RpcClientT`] which provides some higher level helper methods, +/// is cheaply cloneable, and can be handed to things like [`crate::client::OnlineClient`] to +/// instantiate it. #[derive(Clone)] -pub struct RpcClient(Arc); +pub struct RpcClient { + client: Arc, +} impl RpcClient { - pub(crate) fn new(client: Arc) -> Self { - RpcClient(client) + #[cfg(feature = "jsonrpsee")] + /// Create a default RPC client pointed at some URL, currently based on [`jsonrpsee`]. + pub async fn from_url>(url: U) -> Result { + let client = jsonrpsee_helpers::client(url.as_ref()) + .await + .map_err(|e| crate::error::RpcError::ClientError(Box::new(e)))?; + Ok(Self::new(client)) + } + + /// Create a new [`RpcClient`] from an arbitrary [`RpcClientT`] implementation. + pub fn new(client: R) -> Self { + RpcClient { + client: Arc::new(client), + } } /// Make an RPC request, given a method name and some parameters. @@ -31,7 +43,7 @@ impl RpcClient { method: &str, params: RpcParams, ) -> Result { - let res = self.0.request_raw(method, params.build()).await?; + let res = self.client.request_raw(method, params.build()).await?; let val = serde_json::from_str(res.get())?; Ok(val) } @@ -46,9 +58,12 @@ impl RpcClient { sub: &str, params: RpcParams, unsub: &str, - ) -> Result, Error> { - let sub = self.0.subscribe_raw(sub, params.build(), unsub).await?; - Ok(Subscription::new(sub)) + ) -> Result, Error> { + let sub = self + .client + .subscribe_raw(sub, params.build(), unsub) + .await?; + Ok(RpcSubscription::new(sub)) } } @@ -61,7 +76,7 @@ impl std::fmt::Debug for RpcClient { impl std::ops::Deref for RpcClient { type Target = dyn RpcClientT; fn deref(&self) -> &Self::Target { - &*self.0 + &*self.client } } @@ -75,7 +90,7 @@ impl std::ops::Deref for RpcClient { /// # Example /// /// ```rust -/// use subxt::rpc::{ rpc_params, RpcParams }; +/// use subxt::backend::rpc::{ rpc_params, RpcParams }; /// /// // If you provide no params you get `None` back /// let params: RpcParams = rpc_params![]; @@ -90,7 +105,7 @@ macro_rules! rpc_params { ($($p:expr), *) => {{ // May be unused if empty; no params. #[allow(unused_mut)] - let mut params = $crate::rpc::RpcParams::new(); + let mut params = $crate::backend::rpc::RpcParams::new(); $( params.push($p).expect("values passed to rpc_params! must be serializable to JSON"); )* @@ -107,7 +122,7 @@ pub use rpc_params; /// # Example /// /// ```rust -/// use subxt::rpc::RpcParams; +/// use subxt::backend::rpc::RpcParams; /// /// let mut params = RpcParams::new(); /// params.push(1).unwrap(); @@ -151,23 +166,23 @@ impl RpcParams { /// A generic RPC Subscription. This implements [`Stream`], and so most of /// the functionality you'll need to interact with it comes from the /// [`StreamExt`] extension trait. -pub struct Subscription { - inner: RpcSubscription, +pub struct RpcSubscription { + inner: RawRpcSubscription, _marker: std::marker::PhantomData, } -impl std::fmt::Debug for Subscription { +impl std::fmt::Debug for RpcSubscription { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Subscription") - .field("inner", &"RpcSubscription") + f.debug_struct("RpcSubscription") + .field("inner", &"RawRpcSubscription") .field("_marker", &self._marker) .finish() } } -impl Subscription { - /// Creates a new [`Subscription`]. - pub fn new(inner: RpcSubscription) -> Self { +impl RpcSubscription { + /// Creates a new [`RpcSubscription`]. + pub fn new(inner: RawRpcSubscription) -> Self { Self { inner, _marker: std::marker::PhantomData, @@ -175,21 +190,22 @@ impl Subscription { } /// Obtain the ID associated with this subscription. - pub fn subscription_id(&self) -> Option<&RpcSubscriptionId> { - self.inner.id.as_ref() + pub fn subscription_id(&self) -> Option<&str> { + self.inner.id.as_deref() } } -impl Subscription { - /// Wait for the next item from the subscription. +impl RpcSubscription { + /// Returns the next item in the stream. This is just a wrapper around + /// [`StreamExt::next()`] so that you can avoid the extra import. pub async fn next(&mut self) -> Option> { StreamExt::next(self).await } } -impl std::marker::Unpin for Subscription {} +impl std::marker::Unpin for RpcSubscription {} -impl Stream for Subscription { +impl Stream for RpcSubscription { type Item = Result; fn poll_next( @@ -208,3 +224,53 @@ impl Stream for Subscription { Poll::Ready(res) } } + +// helpers for a jsonrpsee specific RPC client. +#[cfg(all(feature = "jsonrpsee", feature = "native"))] +mod jsonrpsee_helpers { + pub use jsonrpsee::{ + client_transport::ws::{Receiver, Sender, Url, WsTransportClientBuilder}, + core::{ + client::{Client, ClientBuilder}, + Error, + }, + }; + + /// Build WS RPC client from URL + pub async fn client(url: &str) -> Result { + let (sender, receiver) = ws_transport(url).await?; + Ok(Client::builder() + .max_buffer_capacity_per_subscription(4096) + .build_with_tokio(sender, receiver)) + } + + async fn ws_transport(url: &str) -> Result<(Sender, Receiver), Error> { + let url = Url::parse(url).map_err(|e| Error::Transport(e.into()))?; + WsTransportClientBuilder::default() + .build(url) + .await + .map_err(|e| Error::Transport(e.into())) + } +} + +// helpers for a jsonrpsee specific RPC client. +#[cfg(all(feature = "jsonrpsee", feature = "web", target_arch = "wasm32"))] +mod jsonrpsee_helpers { + pub use jsonrpsee::{ + client_transport::web, + core::{ + client::{Client, ClientBuilder}, + Error, + }, + }; + + /// Build web RPC client from URL + pub async fn client(url: &str) -> Result { + let (sender, receiver) = web::connect(url) + .await + .map_err(|e| Error::Transport(e.into()))?; + Ok(ClientBuilder::default() + .max_buffer_capacity_per_subscription(4096) + .build_with_wasm(sender, receiver)) + } +} diff --git a/subxt/src/rpc/rpc_client_t.rs b/subxt/src/backend/rpc/rpc_client_t.rs similarity index 76% rename from subxt/src/rpc/rpc_client_t.rs rename to subxt/src/backend/rpc/rpc_client_t.rs index 64576d0968..49ca84cab4 100644 --- a/subxt/src/rpc/rpc_client_t.rs +++ b/subxt/src/backend/rpc/rpc_client_t.rs @@ -9,8 +9,9 @@ use std::{future::Future, pin::Pin}; // Re-exporting for simplicity since it's used a bunch in the trait definition. pub use serde_json::value::RawValue; -/// Any RPC client which implements this can be used in our [`super::Rpc`] type -/// to talk to a node. +/// A trait describing low level JSON-RPC interactions. Implementations of this can be +/// used to instantiate a [`super::RpcClient`], which can be passed to [`crate::OnlineClient`] +/// or used for lower level RPC calls via eg [`crate::backend::legacy::LegacyRpcMethods`]. /// /// This is a low level interface whose methods expect an already-serialized set of params, /// and return an owned but still-serialized [`RawValue`], deferring deserialization to @@ -35,7 +36,7 @@ pub trait RpcClientT: Send + Sync + 'static { &'a self, method: &'a str, params: Option>, - ) -> RpcFuture<'a, Box>; + ) -> RawRpcFuture<'a, Box>; /// Subscribe to some method. Implementations should expect that the params will /// either be `None`, or be an already-serialized JSON array of parameters. @@ -49,23 +50,16 @@ pub trait RpcClientT: Send + Sync + 'static { sub: &'a str, params: Option>, unsub: &'a str, - ) -> RpcFuture<'a, RpcSubscription>; + ) -> RawRpcFuture<'a, RawRpcSubscription>; } /// A boxed future that is returned from the [`RpcClientT`] methods. -pub type RpcFuture<'a, T> = Pin> + Send + 'a>>; +pub type RawRpcFuture<'a, T> = Pin> + Send + 'a>>; /// The RPC subscription returned from [`RpcClientT`]'s `subscription` method. -pub struct RpcSubscription { +pub struct RawRpcSubscription { /// The subscription stream. - pub stream: RpcSubscriptionStream, + pub stream: Pin, RpcError>> + Send + 'static>>, /// The ID associated with the subscription. - pub id: Option, + pub id: Option, } - -/// The inner subscription stream returned from our [`RpcClientT`]'s `subscription` method. -pub type RpcSubscriptionStream = - Pin, RpcError>> + Send + 'static>>; - -/// The ID associated with the [`RpcClientT`]'s `subscription`. -pub type RpcSubscriptionId = String; diff --git a/subxt/src/blocks/block_types.rs b/subxt/src/blocks/block_types.rs index b4cea291f7..25e97d6457 100644 --- a/subxt/src/blocks/block_types.rs +++ b/subxt/src/blocks/block_types.rs @@ -3,12 +3,12 @@ // see LICENSE for license details. use crate::{ + backend::BlockRef, blocks::{extrinsic_types::ExtrinsicPartTypeIds, Extrinsics}, client::{OfflineClientT, OnlineClientT}, config::{Config, Header}, error::{BlockError, Error}, events, - rpc::types::ChainBlockResponse, runtime_api::RuntimeApi, storage::Storage, }; @@ -19,6 +19,7 @@ use std::sync::Arc; /// A representation of a block. pub struct Block { header: T::Header, + block_ref: BlockRef, client: C, // Since we obtain the same events for every extrinsic, let's // cache them so that we only ever do that once: @@ -34,17 +35,24 @@ where T: Config, C: OfflineClientT, { - pub(crate) fn new(header: T::Header, client: C) -> Self { + pub(crate) fn new(header: T::Header, block_ref: BlockRef, client: C) -> Self { Block { header, + block_ref, client, cached_events: Default::default(), } } + /// Return a reference to the given block. While this reference is kept alive, + /// the backend will (if possible) endeavour to keep hold of the block. + pub fn reference(&self) -> BlockRef { + self.block_ref.clone() + } + /// Return the block hash. pub fn hash(&self) -> T::Hash { - self.header.hash() + self.block_ref.hash() } /// Return the block number. @@ -68,72 +76,31 @@ where get_events(&self.client, self.header.hash(), &self.cached_events).await } - /// Fetch and return the block body. - pub async fn body(&self) -> Result, Error> { + /// Fetch and return the extrinsics in the block body. + pub async fn extrinsics(&self) -> Result, Error> { let ids = ExtrinsicPartTypeIds::new(&self.client.metadata())?; let block_hash = self.header.hash(); - let Some(block_details) = self.client.rpc().block(Some(block_hash)).await? else { + let Some(extrinsics) = self.client.backend().block_body(block_hash).await? else { return Err(BlockError::not_found(block_hash).into()); }; - Ok(BlockBody::new( + Ok(Extrinsics::new( self.client.clone(), - block_details, + extrinsics, self.cached_events.clone(), ids, + block_hash, )) } /// Work with storage. pub fn storage(&self) -> Storage { - let block_hash = self.hash(); - Storage::new(self.client.clone(), block_hash) + Storage::new(self.client.clone(), self.block_ref.clone()) } /// Execute a runtime API call at this block. pub async fn runtime_api(&self) -> Result, Error> { - Ok(RuntimeApi::new(self.client.clone(), self.hash())) - } -} - -/// The body of a block. -pub struct BlockBody { - details: ChainBlockResponse, - client: C, - cached_events: CachedEvents, - ids: ExtrinsicPartTypeIds, -} - -impl BlockBody -where - T: Config, - C: OfflineClientT, -{ - pub(crate) fn new( - client: C, - details: ChainBlockResponse, - cached_events: CachedEvents, - ids: ExtrinsicPartTypeIds, - ) -> Self { - Self { - details, - client, - cached_events, - ids, - } - } - - /// Returns an iterator over the extrinsics in the block body. - // Dev note: The returned iterator is 'static + Send so that we can box it up and make - // use of it with our `FilterExtrinsic` stuff. - pub fn extrinsics(&self) -> Extrinsics { - Extrinsics::new( - self.client.clone(), - self.details.block.extrinsics.clone(), - self.cached_events.clone(), - self.ids, - self.details.block.header.hash(), - ) + Ok(RuntimeApi::new(self.client.clone(), self.block_ref.clone())) } } diff --git a/subxt/src/blocks/blocks_client.rs b/subxt/src/blocks/blocks_client.rs index 0f535c5caf..4a80f775fa 100644 --- a/subxt/src/blocks/blocks_client.rs +++ b/subxt/src/blocks/blocks_client.rs @@ -4,16 +4,17 @@ use super::Block; use crate::{ + backend::{BlockRef, StreamOfResults}, client::OnlineClientT, - config::{Config, Header}, + config::Config, error::{BlockError, Error}, utils::PhantomDataSendSync, }; use derivative::Derivative; -use futures::{future::Either, stream, Stream, StreamExt}; -use std::{future::Future, pin::Pin}; +use futures::StreamExt; +use std::future::Future; -type BlockStream = Pin> + Send>>; +type BlockStream = StreamOfResults; type BlockStreamRes = Result, Error>; /// A client for working with blocks. @@ -48,9 +49,9 @@ where /// but may run into errors attempting to work with them. pub fn at( &self, - block_hash: T::Hash, + block_ref: impl Into>, ) -> impl Future, Error>> + Send + 'static { - self.at_or_latest(Some(block_hash)) + self.at_or_latest(Some(block_ref.into())) } /// Obtain block details of the latest block hash. @@ -64,27 +65,22 @@ where /// provided. fn at_or_latest( &self, - block_hash: Option, + block_ref: Option>, ) -> impl Future, Error>> + Send + 'static { let client = self.client.clone(); async move { - // If block hash is not provided, get the hash - // for the latest block and use that. - let block_hash = match block_hash { - Some(hash) => hash, - None => client - .rpc() - .block_hash(None) - .await? - .expect("didn't pass a block number; qed"), + // If a block ref isn't provided, we'll get the latest best block to use. + let block_ref = match block_ref { + Some(r) => r, + None => client.backend().latest_best_block_ref().await?, }; - let block_header = match client.rpc().header(Some(block_hash)).await? { + let block_header = match client.backend().block_header(block_ref.hash()).await? { Some(header) => header, - None => return Err(BlockError::not_found(block_hash).into()), + None => return Err(BlockError::not_found(block_ref.hash()).into()), }; - Ok(Block::new(block_header, client)) + Ok(Block::new(block_header, block_ref, client)) } } @@ -100,8 +96,8 @@ where { let client = self.client.clone(); header_sub_fut_to_block_sub(self.clone(), async move { - let sub = client.rpc().subscribe_all_block_headers().await?; - BlockStreamRes::Ok(Box::pin(sub)) + let sub = client.backend().stream_all_block_headers().await?; + BlockStreamRes::Ok(sub) }) } @@ -117,8 +113,8 @@ where { let client = self.client.clone(); header_sub_fut_to_block_sub(self.clone(), async move { - let sub = client.rpc().subscribe_best_block_headers().await?; - BlockStreamRes::Ok(Box::pin(sub)) + let sub = client.backend().stream_best_block_headers().await?; + BlockStreamRes::Ok(sub) }) } @@ -131,22 +127,8 @@ where { let client = self.client.clone(); header_sub_fut_to_block_sub(self.clone(), async move { - // Fetch the last finalised block details immediately, so that we'll get - // all blocks after this one. - let last_finalized_block_hash = client.rpc().finalized_head().await?; - let last_finalized_block_num = client - .rpc() - .header(Some(last_finalized_block_hash)) - .await? - .map(|h| h.number().into()); - - let sub = client.rpc().subscribe_finalized_block_headers().await?; - - // Adjust the subscription stream to fill in any missing blocks. - BlockStreamRes::Ok( - subscribe_to_block_headers_filling_in_gaps(client, last_finalized_block_num, sub) - .boxed(), - ) + let sub = client.backend().stream_finalized_block_headers().await?; + BlockStreamRes::Ok(sub) }) } } @@ -159,69 +141,19 @@ async fn header_sub_fut_to_block_sub( ) -> Result>, Error> where T: Config, - S: Future, Error>> + Send + 'static, + S: Future)>, Error>> + Send + 'static, Client: OnlineClientT + Send + Sync + 'static, { - let sub = sub.await?.then(move |header| { + let sub = sub.await?.then(move |header_and_ref| { let client = blocks_client.client.clone(); async move { - let header = match header { - Ok(header) => header, + let (header, block_ref) = match header_and_ref { + Ok(header_and_ref) => header_and_ref, Err(e) => return Err(e), }; - Ok(Block::new(header, client)) + Ok(Block::new(header, block_ref, client)) } }); - BlockStreamRes::Ok(Box::pin(sub)) -} - -/// Note: This is exposed for testing but is not considered stable and may change -/// without notice in a patch release. -#[doc(hidden)] -pub fn subscribe_to_block_headers_filling_in_gaps( - client: Client, - mut last_block_num: Option, - sub: S, -) -> impl Stream> + Send -where - T: Config, - Client: OnlineClientT, - S: Stream> + Send, - E: Into + Send + 'static, -{ - sub.flat_map(move |s| { - let client = client.clone(); - - // Get the header, or return a stream containing just the error. - let header = match s { - Ok(header) => header, - Err(e) => return Either::Left(stream::once(async { Err(e.into()) })), - }; - - // We want all previous details up to, but not including this current block num. - let end_block_num = header.number().into(); - - // This is one after the last block we returned details for last time. - let start_block_num = last_block_num.map(|n| n + 1).unwrap_or(end_block_num); - - // Iterate over all of the previous blocks we need headers for, ignoring the current block - // (which we already have the header info for): - let previous_headers = stream::iter(start_block_num..end_block_num) - .then(move |n| { - let rpc = client.rpc().clone(); - async move { - let hash = rpc.block_hash(Some(n.into())).await?; - let header = rpc.header(hash).await?; - Ok::<_, Error>(header) - } - }) - .filter_map(|h| async { h.transpose() }); - - // On the next iteration, we'll get details starting just after this end block. - last_block_num = Some(end_block_num); - - // Return a combination of any previous headers plus the new header. - Either::Right(previous_headers.chain(stream::once(async { Ok(header) }))) - }) + BlockStreamRes::Ok(StreamOfResults::new(Box::pin(sub))) } diff --git a/subxt/src/blocks/extrinsic_types.rs b/subxt/src/blocks/extrinsic_types.rs index b7cd41588a..ba83173bbf 100644 --- a/subxt/src/blocks/extrinsic_types.rs +++ b/subxt/src/blocks/extrinsic_types.rs @@ -9,7 +9,6 @@ use crate::{ error::{BlockError, Error, MetadataError}, events, metadata::types::PalletMetadata, - rpc::types::ChainBlockExtrinsic, Metadata, }; @@ -40,7 +39,7 @@ pub trait StaticExtrinsic: DecodeAsFields { /// The body of a block. pub struct Extrinsics { client: C, - extrinsics: Vec, + extrinsics: Vec>, cached_events: CachedEvents, ids: ExtrinsicPartTypeIds, hash: T::Hash, @@ -53,7 +52,7 @@ where { pub(crate) fn new( client: C, - extrinsics: Vec, + extrinsics: Vec>, cached_events: CachedEvents, ids: ExtrinsicPartTypeIds, hash: T::Hash, @@ -103,7 +102,7 @@ where } else { match ExtrinsicDetails::decode_from( index as u32, - &extrinsics[index].0, + &extrinsics[index], client.clone(), hash, cached_events.clone(), @@ -562,7 +561,7 @@ impl ExtrinsicEvents { #[cfg(test)] mod tests { use super::*; - use crate::{rpc::types::RuntimeVersion, OfflineClient, PolkadotConfig}; + use crate::{backend::RuntimeVersion, OfflineClient, PolkadotConfig}; use assert_matches::assert_matches; use codec::{Decode, Encode}; use frame_metadata::v15::{CustomMetadata, OuterEnums}; @@ -697,7 +696,6 @@ mod tests { let rt_version = RuntimeVersion { spec_version: 1, transaction_version: 4, - other: Default::default(), }; let block_hash = H256::random(); OfflineClient::new(block_hash, rt_version, metadata) diff --git a/subxt/src/blocks/mod.rs b/subxt/src/blocks/mod.rs index 085eebe9a5..da99fe22d5 100644 --- a/subxt/src/blocks/mod.rs +++ b/subxt/src/blocks/mod.rs @@ -8,6 +8,9 @@ mod block_types; mod blocks_client; mod extrinsic_types; -pub use block_types::{Block, BlockBody}; -pub use blocks_client::{subscribe_to_block_headers_filling_in_gaps, BlocksClient}; +/// A reference to a block. +pub use crate::backend::BlockRef; + +pub use block_types::Block; +pub use blocks_client::BlocksClient; pub use extrinsic_types::{ExtrinsicDetails, ExtrinsicEvents, Extrinsics, StaticExtrinsic}; diff --git a/subxt/src/book/setup/client.rs b/subxt/src/book/setup/client.rs index fb1567d687..30f86fe2c9 100644 --- a/subxt/src/book/setup/client.rs +++ b/subxt/src/book/setup/client.rs @@ -18,23 +18,27 @@ //! The provided clients are all generic over the [`crate::config::Config`] that they accept, which //! determines how they will interact with the chain. //! -//! In the case of the [`crate::OnlineClient`], we have a few options to instantiate it: +//! In the case of the [`crate::OnlineClient`], we have various ways to instantiate it: //! -//! - [`crate::OnlineClient::new()`] to connect to a node running locally. -//! - [`crate::OnlineClient::from_url()`] to connect to a node at a specific URL. -//! - [`crate::OnlineClient::from_rpc_client()`] to instantiate the client with a custom RPC -//! implementation. +//! - [`crate::OnlineClient::new()`] to connect to a node running locally. This uses the default Subxt +//! backend, and the default RPC client. +//! - [`crate::OnlineClient::from_url()`] to connect to a node at a specific URL. This uses the default Subxt +//! backend, and the default RPC client. +//! - [`crate::OnlineClient::from_rpc_client()`] to instantiate the client with a [`crate::backend::rpc::RpcClient`]. +//! - [`crate::OnlineClient::from_backend()`] to instantiate Subxt using a custom backend. Currently there +//! is just one backend, [`crate::backend::legacy::LegacyBackend`]. This backend can be instantiated from +//! a [`crate::backend::rpc::RpcClient`]. //! -//! The latter accepts anything that implements the low level [`crate::rpc::RpcClientT`] trait; this -//! allows you to decide how Subxt will attempt to talk to a node if you'd prefer something other -//! than the provided interfaces. Under the hood, this is also how the light client is implemented. +//! [`crate::backend::rpc::RpcClient`] can itself be instantiated from anything that implements the low level +//! [`crate::backend::rpc::RpcClientT`] trait; this allows you to decide how Subxt will attempt to talk to a node +//! if you'd prefer something other default client. We use this approach under the hood to implement the light client. //! //! ## Examples //! //! Most of the other examples will instantiate a client. Here are a couple of examples for less common //! cases. //! -//! ### Writing a custom [`crate::rpc::RpcClientT`] implementation: +//! ### Writing a custom [`crate::backend::rpc::RpcClientT`] implementation: //! //! ```rust,ignore #![doc = include_str!("../../../examples/setup_client_custom_rpc.rs")] diff --git a/subxt/src/book/usage/blocks.rs b/subxt/src/book/usage/blocks.rs index 61f1ae9340..c98e0298ca 100644 --- a/subxt/src/book/usage/blocks.rs +++ b/subxt/src/book/usage/blocks.rs @@ -16,7 +16,7 @@ //! //! In either case, you'll end up with [`crate::blocks::Block`]'s, from which you can access various //! information about the block, such a the [header](crate::blocks::Block::header()), [block -//! number](crate::blocks::Block::number()) and [body](crate::blocks::Block::body()). +//! number](crate::blocks::Block::number()) and [body (the extrinsics)](crate::blocks::Block::extrinsics()). //! [`crate::blocks::Block`]'s also provide shortcuts to other Subxt APIs that will operate at the //! given block: //! @@ -29,9 +29,9 @@ //! //! ## Example //! -//! Given a block, you can [download the block body](crate::blocks::Block::body()) and iterate over -//! the extrinsics stored within it using [`crate::blocks::BlockBody::extrinsics()`]. From there, you -//! can decode the extrinsics and access various details, including the associated events: +//! Given a block, you can [download the block body](crate::blocks::Block::extrinsics()) and iterate over +//! the extrinsics stored within it. From there, you can decode the extrinsics and access various details, +//! including the associated events: //! //! ```rust,ignore #![doc = include_str!("../../../examples/blocks_subscribing.rs")] diff --git a/subxt/src/book/usage/storage.rs b/subxt/src/book/usage/storage.rs index e387577b7c..6d636c6bc3 100644 --- a/subxt/src/book/usage/storage.rs +++ b/subxt/src/book/usage/storage.rs @@ -123,7 +123,7 @@ //! ### Advanced //! //! For more advanced use cases, have a look at [`crate::storage::Storage::fetch_raw`] and -//! [`crate::storage::Storage::fetch_keys`]. Both of these take raw bytes as arguments, which can be +//! [`crate::storage::Storage::fetch_raw_keys`]. Both of these take raw bytes as arguments, which can be //! obtained from a [`crate::storage::StorageAddress`] by using //! [`crate::storage::StorageClient::address_bytes()`] or //! [`crate::storage::StorageClient::address_root_bytes()`]. diff --git a/subxt/src/client/lightclient/builder.rs b/subxt/src/client/lightclient/builder.rs index d80ea23d22..8c02f2f761 100644 --- a/subxt/src/client/lightclient/builder.rs +++ b/subxt/src/client/lightclient/builder.rs @@ -3,12 +3,10 @@ // see LICENSE for license details. use super::{rpc::LightClientRpc, LightClient, LightClientError}; +use crate::backend::rpc::RpcClient; use crate::{config::Config, error::Error, OnlineClient}; - -use subxt_lightclient::{AddChainConfig, AddChainConfigJsonRpc, ChainId}; - use std::num::NonZeroU32; -use std::sync::Arc; +use subxt_lightclient::{AddChainConfig, AddChainConfigJsonRpc, ChainId}; /// Builder for [`LightClient`]. #[derive(Clone, Debug)] @@ -149,8 +147,8 @@ impl LightClientBuilder { user_data: (), }; - let rpc = LightClientRpc::new(config)?; - let online_client = OnlineClient::::from_rpc_client(Arc::new(rpc)).await?; + let rpc_client = RpcClient::new(LightClientRpc::new(config)?); + let online_client = OnlineClient::::from_rpc_client(rpc_client).await?; Ok(LightClient(online_client)) } } diff --git a/subxt/src/client/lightclient/mod.rs b/subxt/src/client/lightclient/mod.rs index ef95173f35..20ff92c54e 100644 --- a/subxt/src/client/lightclient/mod.rs +++ b/subxt/src/client/lightclient/mod.rs @@ -77,7 +77,7 @@ impl LightClient { } /// Return the runtime version. - fn runtime_version(&self) -> crate::rpc::types::RuntimeVersion { + fn runtime_version(&self) -> crate::backend::RuntimeVersion { self.0.runtime_version() } @@ -118,8 +118,8 @@ impl LightClient { } impl OnlineClientT for LightClient { - fn rpc(&self) -> &crate::rpc::Rpc { - self.0.rpc() + fn backend(&self) -> &dyn crate::backend::Backend { + self.0.backend() } } @@ -132,7 +132,7 @@ impl OfflineClientT for LightClient { self.genesis_hash() } - fn runtime_version(&self) -> crate::rpc::types::RuntimeVersion { + fn runtime_version(&self) -> crate::backend::RuntimeVersion { self.runtime_version() } } diff --git a/subxt/src/client/lightclient/rpc.rs b/subxt/src/client/lightclient/rpc.rs index 4d675db6b9..7c8948c9bd 100644 --- a/subxt/src/client/lightclient/rpc.rs +++ b/subxt/src/client/lightclient/rpc.rs @@ -4,12 +4,11 @@ use super::LightClientError; use crate::{ + backend::rpc::{RawRpcFuture, RawRpcSubscription, RpcClientT}, error::{Error, RpcError}, - rpc::{RpcClientT, RpcFuture, RpcSubscription}, }; -use futures::{Stream, StreamExt}; +use futures::StreamExt; use serde_json::value::RawValue; -use std::pin::Pin; use subxt_lightclient::{AddChainConfig, ChainId, LightClientRpcError}; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -45,7 +44,7 @@ impl RpcClientT for LightClientRpc { &'a self, method: &'a str, params: Option>, - ) -> RpcFuture<'a, Box> { + ) -> RawRpcFuture<'a, Box> { let client = self.clone(); Box::pin(async move { @@ -78,7 +77,7 @@ impl RpcClientT for LightClientRpc { sub: &'a str, params: Option>, _unsub: &'a str, - ) -> RpcFuture<'a, RpcSubscription> { + ) -> RawRpcFuture<'a, RawRpcSubscription> { let client = self.clone(); Box::pin(async move { @@ -121,12 +120,8 @@ impl RpcClientT for LightClientRpc { let stream = UnboundedReceiverStream::new(notif); - let rpc_substription_stream: Pin< - Box, RpcError>> + Send + 'static>, - > = Box::pin(stream.map(Ok)); - - let rpc_subscription: RpcSubscription = RpcSubscription { - stream: rpc_substription_stream, + let rpc_subscription = RawRpcSubscription { + stream: Box::pin(stream.map(Ok)), id: Some(sub_id), }; diff --git a/subxt/src/client/mod.rs b/subxt/src/client/mod.rs index b2cc087d34..a800ad603c 100644 --- a/subxt/src/client/mod.rs +++ b/subxt/src/client/mod.rs @@ -19,8 +19,5 @@ pub use online_client::{ ClientRuntimeUpdater, OnlineClient, OnlineClientT, RuntimeUpdaterStream, Update, UpgradeError, }; -#[cfg(feature = "jsonrpsee")] -pub use online_client::default_rpc_client; - #[cfg(feature = "unstable-light-client")] pub use lightclient::{LightClient, LightClientBuilder, LightClientError}; diff --git a/subxt/src/client/offline_client.rs b/subxt/src/client/offline_client.rs index be81cd5b7e..4477e21b7a 100644 --- a/subxt/src/client/offline_client.rs +++ b/subxt/src/client/offline_client.rs @@ -4,9 +4,9 @@ use crate::custom_values::CustomValuesClient; use crate::{ - blocks::BlocksClient, constants::ConstantsClient, events::EventsClient, - rpc::types::RuntimeVersion, runtime_api::RuntimeApiClient, storage::StorageClient, - tx::TxClient, Config, Metadata, + backend::RuntimeVersion, blocks::BlocksClient, constants::ConstantsClient, + events::EventsClient, runtime_api::RuntimeApiClient, storage::StorageClient, tx::TxClient, + Config, Metadata, }; use derivative::Derivative; diff --git a/subxt/src/client/online_client.rs b/subxt/src/client/online_client.rs index 44ba98f446..6e042dde52 100644 --- a/subxt/src/client/online_client.rs +++ b/subxt/src/client/online_client.rs @@ -5,30 +5,27 @@ use super::{OfflineClient, OfflineClientT}; use crate::custom_values::CustomValuesClient; use crate::{ + backend::{ + legacy::LegacyBackend, rpc::RpcClient, Backend, BackendExt, RuntimeVersion, StreamOfResults, + }, blocks::BlocksClient, constants::ConstantsClient, error::Error, events::EventsClient, - rpc::{ - types::{RuntimeVersion, Subscription}, - Rpc, RpcClientT, - }, runtime_api::RuntimeApiClient, storage::StorageClient, tx::TxClient, Config, Metadata, }; use derivative::Derivative; - use futures::future; - use std::sync::{Arc, RwLock}; /// A trait representing a client that can perform /// online actions. pub trait OnlineClientT: OfflineClientT { - /// Return an RPC client that can be used to communicate with a node. - fn rpc(&self) -> &Rpc; + /// Return a backend that can be used to communicate with a node. + fn backend(&self) -> &dyn Backend; } /// A client that can be used to perform API calls (that is, either those @@ -37,7 +34,7 @@ pub trait OnlineClientT: OfflineClientT { #[derivative(Clone(bound = ""))] pub struct OnlineClient { inner: Arc>>, - rpc: Rpc, + backend: Arc>, } #[derive(Derivative)] @@ -57,15 +54,6 @@ impl std::fmt::Debug for OnlineClient { } } -/// The default RPC client that's used (based on [`jsonrpsee`]). -#[cfg(feature = "jsonrpsee")] -pub async fn default_rpc_client>(url: U) -> Result { - let client = jsonrpsee_helpers::client(url.as_ref()) - .await - .map_err(|e| crate::error::RpcError::ClientError(Box::new(e)))?; - Ok(client) -} - // The default constructors assume Jsonrpsee. #[cfg(feature = "jsonrpsee")] impl OnlineClient { @@ -78,26 +66,56 @@ impl OnlineClient { /// Construct a new [`OnlineClient`], providing a URL to connect to. pub async fn from_url(url: impl AsRef) -> Result, Error> { - let client = default_rpc_client(url).await?; - OnlineClient::from_rpc_client(Arc::new(client)).await + let client = RpcClient::from_url(url).await?; + let backend = LegacyBackend::new(client); + OnlineClient::from_backend(Arc::new(backend)).await } } impl OnlineClient { - /// Construct a new [`OnlineClient`] by providing an underlying [`RpcClientT`] - /// implementation to drive the connection. - pub async fn from_rpc_client( - rpc_client: Arc, + /// Construct a new [`OnlineClient`] by providing an [`RpcClient`] to drive the connection. + /// This will use the current default [`Backend`], which may change in future releases. + pub async fn from_rpc_client(rpc_client: RpcClient) -> Result, Error> { + let backend = Arc::new(LegacyBackend::new(rpc_client)); + OnlineClient::from_backend(backend).await + } + + /// Construct a new [`OnlineClient`] by providing an RPC client along with the other + /// necessary details. This will use the current default [`Backend`], which may change + /// in future releases. + /// + /// # Warning + /// + /// This is considered the most primitive and also error prone way to + /// instantiate a client; the genesis hash, metadata and runtime version provided will + /// entirely determine which node and blocks this client will be able to interact with, + /// and whether it will be able to successfully do things like submit transactions. + /// + /// If you're unsure what you're doing, prefer one of the alternate methods to instantiate + /// a client. + pub fn from_rpc_client_with( + genesis_hash: T::Hash, + runtime_version: RuntimeVersion, + metadata: impl Into, + rpc_client: RpcClient, ) -> Result, Error> { - let rpc = Rpc::::new(rpc_client.clone()); + let backend = Arc::new(LegacyBackend::new(rpc_client)); + OnlineClient::from_backend_with(genesis_hash, runtime_version, metadata, backend) + } + + /// Construct a new [`OnlineClient`] by providing an underlying [`Backend`] + /// implementation to power it. Other details will be obtained from the chain. + pub async fn from_backend>(backend: Arc) -> Result, Error> { + let latest_block = backend.latest_best_block_ref().await?; + let (genesis_hash, runtime_version, metadata) = future::join3( - rpc.genesis_hash(), - rpc.runtime_version(None), - OnlineClient::fetch_metadata(&rpc), + backend.genesis_hash(), + backend.current_runtime_version(), + OnlineClient::fetch_metadata(&*backend, latest_block.hash()), ) .await; - OnlineClient::from_rpc_client_with(genesis_hash?, runtime_version?, metadata?, rpc_client) + OnlineClient::from_backend_with(genesis_hash?, runtime_version?, metadata?, backend) } /// Construct a new [`OnlineClient`] by providing all of the underlying details needed @@ -112,11 +130,11 @@ impl OnlineClient { /// /// If you're unsure what you're doing, prefer one of the alternate methods to instantiate /// a client. - pub fn from_rpc_client_with( + pub fn from_backend_with>( genesis_hash: T::Hash, runtime_version: RuntimeVersion, metadata: impl Into, - rpc_client: Arc, + backend: Arc, ) -> Result, Error> { Ok(OnlineClient { inner: Arc::new(RwLock::new(Inner { @@ -124,12 +142,15 @@ impl OnlineClient { runtime_version, metadata: metadata.into(), })), - rpc: Rpc::new(rpc_client), + backend, }) } /// Fetch the metadata from substrate using the runtime API. - async fn fetch_metadata(rpc: &Rpc) -> Result { + async fn fetch_metadata( + backend: &dyn Backend, + block_hash: T::Hash, + ) -> Result { #[cfg(feature = "unstable-metadata")] { /// The unstable metadata version number. @@ -137,28 +158,37 @@ impl OnlineClient { // Try to fetch the latest unstable metadata, if that fails fall back to // fetching the latest stable metadata. - match rpc.metadata_at_version(UNSTABLE_METADATA_VERSION).await { + match backend + .metadata_at_version(UNSTABLE_METADATA_VERSION, block_hash) + .await + { Ok(bytes) => Ok(bytes), - Err(_) => OnlineClient::fetch_latest_stable_metadata(rpc).await, + Err(_) => OnlineClient::fetch_latest_stable_metadata(backend, block_hash).await, } } #[cfg(not(feature = "unstable-metadata"))] - OnlineClient::fetch_latest_stable_metadata(rpc).await + OnlineClient::fetch_latest_stable_metadata(backend, block_hash).await } /// Fetch the latest stable metadata from the node. - async fn fetch_latest_stable_metadata(rpc: &Rpc) -> Result { + async fn fetch_latest_stable_metadata( + backend: &dyn Backend, + block_hash: T::Hash, + ) -> Result { // This is the latest stable metadata that subxt can utilize. const V15_METADATA_VERSION: u32 = 15; // Try to fetch the metadata version. - if let Ok(bytes) = rpc.metadata_at_version(V15_METADATA_VERSION).await { + if let Ok(bytes) = backend + .metadata_at_version(V15_METADATA_VERSION, block_hash) + .await + { return Ok(bytes); } // If that fails, fetch the metadata V14 using the old API. - rpc.metadata().await + backend.legacy_metadata(block_hash).await } /// Create an object which can be used to keep the runtime up to date @@ -258,8 +288,8 @@ impl OnlineClient { } /// Return an RPC client to make raw requests with. - pub fn rpc(&self) -> &Rpc { - &self.rpc + pub fn backend(&self) -> &dyn Backend { + &*self.backend } /// Return an offline client with the same configuration as this. @@ -324,8 +354,8 @@ impl OfflineClientT for OnlineClient { } impl OnlineClientT for OnlineClient { - fn rpc(&self) -> &Rpc { - &self.rpc + fn backend(&self) -> &dyn Backend { + &*self.backend } } @@ -383,7 +413,7 @@ impl ClientRuntimeUpdater { /// Instead that's up to the user of this API to decide when to update and /// to perform the actual updating. pub async fn runtime_updates(&self) -> Result, Error> { - let stream = self.0.rpc().subscribe_runtime_version().await?; + let stream = self.0.backend().stream_runtime_version().await?; Ok(RuntimeUpdaterStream { stream, client: self.0.clone(), @@ -393,12 +423,12 @@ impl ClientRuntimeUpdater { /// Stream to perform runtime upgrades. pub struct RuntimeUpdaterStream { - stream: Subscription, + stream: StreamOfResults, client: OnlineClient, } impl RuntimeUpdaterStream { - /// Get the next element of the stream. + /// Wait for the next runtime update. pub async fn next(&mut self) -> Option> { let maybe_runtime_version = self.stream.next().await?; @@ -407,7 +437,17 @@ impl RuntimeUpdaterStream { Err(err) => return Some(Err(err)), }; - let metadata = match self.client.rpc().metadata().await { + let latest_block_ref = match self.client.backend().latest_best_block_ref().await { + Ok(block_ref) => block_ref, + Err(e) => return Some(Err(e)), + }; + + let metadata = match OnlineClient::fetch_metadata( + self.client.backend(), + latest_block_ref.hash(), + ) + .await + { Ok(metadata) => metadata, Err(err) => return Some(Err(err)), }; @@ -444,53 +484,3 @@ impl Update { &self.metadata } } - -// helpers for a jsonrpsee specific OnlineClient. -#[cfg(all(feature = "jsonrpsee", feature = "native"))] -mod jsonrpsee_helpers { - pub use jsonrpsee::{ - client_transport::ws::{Receiver, Sender, Url, WsTransportClientBuilder}, - core::{ - client::{Client, ClientBuilder}, - Error, - }, - }; - - /// Build WS RPC client from URL - pub async fn client(url: &str) -> Result { - let (sender, receiver) = ws_transport(url).await?; - Ok(Client::builder() - .max_buffer_capacity_per_subscription(4096) - .build_with_tokio(sender, receiver)) - } - - async fn ws_transport(url: &str) -> Result<(Sender, Receiver), Error> { - let url = Url::parse(url).map_err(|e| Error::Transport(e.into()))?; - WsTransportClientBuilder::default() - .build(url) - .await - .map_err(|e| Error::Transport(e.into())) - } -} - -// helpers for a jsonrpsee specific OnlineClient. -#[cfg(all(feature = "jsonrpsee", feature = "web", target_arch = "wasm32"))] -mod jsonrpsee_helpers { - pub use jsonrpsee::{ - client_transport::web, - core::{ - client::{Client, ClientBuilder}, - Error, - }, - }; - - /// Build web RPC client from URL - pub async fn client(url: &str) -> Result { - let (sender, receiver) = web::connect(url) - .await - .map_err(|e| Error::Transport(e.into()))?; - Ok(ClientBuilder::default() - .max_buffer_capacity_per_subscription(4096) - .build_with_wasm(sender, receiver)) - } -} diff --git a/subxt/src/config/mod.rs b/subxt/src/config/mod.rs index 7638ddd167..a3ee59f91a 100644 --- a/subxt/src/config/mod.rs +++ b/subxt/src/config/mod.rs @@ -26,10 +26,11 @@ pub use signed_extensions::SignedExtension; pub use substrate::{SubstrateConfig, SubstrateExtrinsicParams, SubstrateExtrinsicParamsBuilder}; /// Runtime types. -// Note: the 'static bound isn't strictly required, but currently deriving TypeInfo -// automatically applies a 'static bound to all generic types (including this one), -// and so until that is resolved, we'll keep the (easy to satisfy) constraint here. -pub trait Config: Sized + 'static { +// Note: the `Send + Sync + 'static` bound isn't strictly required, but currently deriving +// TypeInfo automatically applies a 'static bound to all generic types (including this one), +// And we want the compiler to infer `Send` and `Sync` OK for things which have `T: Config` +// rather than having to `unsafe impl` them ourselves. +pub trait Config: Sized + Send + Sync + 'static { /// The output of the `Hasher` function. type Hash: Debug + Copy diff --git a/subxt/src/config/substrate.rs b/subxt/src/config/substrate.rs index 29bff1f03d..aed2b67660 100644 --- a/subxt/src/config/substrate.rs +++ b/subxt/src/config/substrate.rs @@ -224,7 +224,7 @@ where { // At the time of writing, Smoldot gives back block numbers in numeric rather // than hex format. So let's support deserializing from both here: - use crate::rpc::types::NumberOrHex; + use crate::backend::legacy::rpc_methods::NumberOrHex; let number_or_hex = NumberOrHex::deserialize(d)?; let u256 = number_or_hex.into_u256(); TryFrom::try_from(u256).map_err(|_| serde::de::Error::custom("Try from failed")) diff --git a/subxt/src/custom_values/custom_values_client.rs b/subxt/src/custom_values/custom_values_client.rs index 04e3903ba5..3c1f5988b2 100644 --- a/subxt/src/custom_values/custom_values_client.rs +++ b/subxt/src/custom_values/custom_values_client.rs @@ -47,15 +47,14 @@ impl> CustomValuesClient { #[cfg(test)] mod tests { - use std::collections::BTreeMap; - + use crate::backend::RuntimeVersion; use crate::custom_values::CustomValuesClient; - use crate::rpc::types::RuntimeVersion; use crate::{Metadata, OfflineClient, SubstrateConfig}; + use codec::Encode; use scale_decode::DecodeAsType; use scale_info::form::PortableForm; use scale_info::TypeInfo; - use sp_core::Encode; + use std::collections::BTreeMap; #[derive(Debug, Clone, PartialEq, Eq, Encode, TypeInfo, DecodeAsType)] pub struct Person { @@ -116,7 +115,6 @@ mod tests { RuntimeVersion { spec_version: 0, transaction_version: 0, - other: Default::default(), }, mock_metadata(), ); diff --git a/subxt/src/error/mod.rs b/subxt/src/error/mod.rs index 2c40e09373..527766f53d 100644 --- a/subxt/src/error/mod.rs +++ b/subxt/src/error/mod.rs @@ -144,23 +144,19 @@ impl BlockError { #[derive(Clone, Debug, Eq, thiserror::Error, PartialEq)] #[non_exhaustive] pub enum TransactionError { - /// The finality subscription expired (after ~512 blocks we give up if the - /// block hasn't yet been finalized). - #[error("The finality subscription expired")] - FinalityTimeout, /// The block hash that the transaction was added to could not be found. /// This is probably because the block was retracted before being finalized. #[error("The block containing the transaction can no longer be found (perhaps it was on a non-finalized fork?)")] BlockNotFound, - /// The transaction was deemed invalid in the current chain state. - #[error("The transaction is no longer valid")] - Invalid, - /// The transaction was replaced by a transaction with the same (sender, nonce) pair but with higher priority - #[error("The transaction was replaced by a transaction with the same (sender, nonce) pair but with higher priority.")] - Usurped, - /// The transaction was dropped because of some limit - #[error("The transaction was dropped from the pool because of a limit.")] - Dropped, + /// An error happened on the node that the transaction was submitted to. + #[error("Error handling transaction: {0}")] + Error(String), + /// The transaction was deemed invalid. + #[error("The transaction is not valid: {0}")] + Invalid(String), + /// The transaction was dropped. + #[error("The transaction was dropped: {0}")] + Dropped(String), } /// Something went wrong trying to encode a storage address. diff --git a/subxt/src/events/events_client.rs b/subxt/src/events/events_client.rs index 0a30f2dc6a..1d750ed522 100644 --- a/subxt/src/events/events_client.rs +++ b/subxt/src/events/events_client.rs @@ -2,7 +2,8 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -use crate::{client::OnlineClientT, error::Error, events::Events, rpc::types::StorageKey, Config}; +use crate::backend::{Backend, BackendExt, BlockRef}; +use crate::{client::OnlineClientT, error::Error, events::Events, Config}; use derivative::Derivative; use std::future::Future; @@ -38,12 +39,12 @@ where /// but may run into errors attempting to work with them. pub fn at( &self, - block_hash: T::Hash, + block_ref: impl Into>, ) -> impl Future, Error>> + Send + 'static { - self.at_or_latest(Some(block_hash)) + self.at_or_latest(Some(block_ref.into())) } - /// Obtain events at the latest block hash. + /// Obtain events for the latest block. pub fn at_latest(&self) -> impl Future, Error>> + Send + 'static { self.at_or_latest(None) } @@ -51,49 +52,45 @@ where /// Obtain events at some block hash. fn at_or_latest( &self, - block_hash: Option, + block_ref: Option>, ) -> impl Future, Error>> + Send + 'static { // Clone and pass the client in like this so that we can explicitly // return a Future that's Send + 'static, rather than tied to &self. let client = self.client.clone(); async move { - // If block hash is not provided, get the hash - // for the latest block and use that. - let block_hash = match block_hash { - Some(hash) => hash, - None => client - .rpc() - .block_hash(None) - .await? - .expect("didn't pass a block number; qed"), + // If a block ref isn't provided, we'll get the latest best block to use. + let block_ref = match block_ref { + Some(r) => r, + None => client.backend().latest_best_block_ref().await?, }; - let event_bytes = get_event_bytes(&client, Some(block_hash)).await?; - Ok(Events::new(client.metadata(), block_hash, event_bytes)) + let event_bytes = get_event_bytes(client.backend(), block_ref.hash()).await?; + Ok(Events::new( + client.metadata(), + block_ref.hash(), + event_bytes, + )) } } } // The storage key needed to access events. -fn system_events_key() -> StorageKey { - let mut storage_key = sp_core_hashing::twox_128(b"System").to_vec(); - storage_key.extend(sp_core_hashing::twox_128(b"Events").to_vec()); - StorageKey(storage_key) +fn system_events_key() -> [u8; 32] { + let a = sp_core_hashing::twox_128(b"System"); + let b = sp_core_hashing::twox_128(b"Events"); + let mut res = [0; 32]; + res[0..16].clone_from_slice(&a); + res[16..32].clone_from_slice(&b); + res } // Get the event bytes from the provided client, at the provided block hash. -pub(crate) async fn get_event_bytes( - client: &Client, - block_hash: Option, -) -> Result, Error> -where - T: Config, - Client: OnlineClientT, -{ - Ok(client - .rpc() - .storage(&system_events_key().0, block_hash) +pub(crate) async fn get_event_bytes( + backend: &dyn Backend, + block_hash: T::Hash, +) -> Result, Error> { + Ok(backend + .storage_fetch_value(system_events_key().to_vec(), block_hash) .await? - .map(|e| e.0) - .unwrap_or_else(Vec::new)) + .unwrap_or_default()) } diff --git a/subxt/src/events/events_type.rs b/subxt/src/events/events_type.rs index 97cd892969..831268ad00 100644 --- a/subxt/src/events/events_type.rs +++ b/subxt/src/events/events_type.rs @@ -69,36 +69,13 @@ impl Events { /// Obtain the events from a block hash given custom metadata and a client. /// - /// This method gives users the ability to inspect the events of older blocks, - /// where the metadata changed. For those cases, the user is responsible for - /// providing a valid metadata. + /// # Notes /// - /// # Example - /// - /// ```no_run - /// # #[tokio::main] - /// # async fn main() -> Result<(), Box> { - /// use subxt::{ OnlineClient, PolkadotConfig, events::Events }; - /// - /// let client = OnlineClient::::new().await.unwrap(); - /// - /// // Get the hash of an older block. - /// let block_hash = client - /// .rpc() - /// .block_hash(Some(1u32.into())) - /// .await? - /// .expect("didn't pass a block number; qed"); - /// // Fetch the metadata of the given block. - /// let metadata = client.rpc().metadata_legacy(Some(block_hash)).await?; - /// // Fetch the events from the client. - /// let events = Events::new_from_client(metadata, block_hash, client); - /// # Ok(()) - /// # } - /// ``` - /// - /// # Note - /// - /// Prefer to use [`crate::events::EventsClient::at`] to obtain the events. + /// - Prefer to use [`crate::events::EventsClient::at`] to obtain the events. + /// - Subxt may fail to decode things that aren't from a runtime using the + /// latest metadata version. + /// - The client may not be able to obtain the block at the given hash. Only + /// archive nodes keep hold of all past block information. pub async fn new_from_client( metadata: Metadata, block_hash: T::Hash, @@ -107,7 +84,7 @@ impl Events { where Client: OnlineClientT, { - let event_bytes = get_event_bytes(&client, Some(block_hash)).await?; + let event_bytes = get_event_bytes(client.backend(), block_hash).await?; Ok(Events::new(metadata, block_hash, event_bytes)) } @@ -669,7 +646,7 @@ mod tests { // construst an Events object to iterate them: let event = Event::A(1, true, vec!["Hi".into()]); let events = events::( - metadata.clone(), + metadata, vec![event_record(Phase::ApplyExtrinsic(123), event)], ); @@ -711,7 +688,7 @@ mod tests { let event3 = Event::A(234); let events = events::( - metadata.clone(), + metadata, vec![ event_record(Phase::Initialization, event1), event_record(Phase::ApplyExtrinsic(123), event2), @@ -782,7 +759,7 @@ mod tests { // Encode our events in the format we expect back from a node, and // construst an Events object to iterate them: let events = events_raw( - metadata.clone(), + metadata, event_bytes, 3, // 2 "good" events, and then it'll hit the naff bytes. ); @@ -833,7 +810,7 @@ mod tests { // Encode our events in the format we expect back from a node, and // construst an Events object to iterate them: let events = events::( - metadata.clone(), + metadata, vec![event_record(Phase::Finalization, Event::A(1))], ); @@ -870,7 +847,7 @@ mod tests { // Encode our events in the format we expect back from a node, and // construct an Events object to iterate them: let events = events::( - metadata.clone(), + metadata, vec![event_record( Phase::Finalization, Event::A(CompactWrapper(1)), @@ -914,7 +891,7 @@ mod tests { // Encode our events in the format we expect back from a node, and // construct an Events object to iterate them: let events = events::( - metadata.clone(), + metadata, vec![event_record(Phase::Finalization, Event::A(MyType::B))], ); diff --git a/subxt/src/lib.rs b/subxt/src/lib.rs index 95dd4bd7ee..ae60494f8d 100644 --- a/subxt/src/lib.rs +++ b/subxt/src/lib.rs @@ -63,6 +63,7 @@ use tracing_subscriber as _; #[allow(unused_imports)] pub use getrandom as _; +pub mod backend; pub mod blocks; pub mod client; pub mod config; @@ -72,7 +73,6 @@ pub mod dynamic; pub mod error; pub mod events; pub mod metadata; -pub mod rpc; pub mod runtime_api; pub mod storage; pub mod tx; @@ -91,6 +91,7 @@ pub use crate::{ pub mod ext { pub use codec; pub use frame_metadata; + pub use futures; pub use scale_bits; pub use scale_decode; pub use scale_encode; diff --git a/subxt/src/rpc/mod.rs b/subxt/src/rpc/mod.rs deleted file mode 100644 index 2254db313c..0000000000 --- a/subxt/src/rpc/mod.rs +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! RPC types and client for interacting with a substrate node. -//! -//! These is used behind the scenes by various `subxt` APIs, but can -//! also be used directly. -//! -//! - [`Rpc`] is the highest level wrapper, and the one you will run into -//! first. It contains the higher level methods for interacting with a node. -//! - [`RpcClient`] is what [`Rpc`] uses to actually talk to a node, offering -//! a [`RpcClient::request`] and [`RpcClient::subscribe`] method to do so. -//! - [`RpcClientT`] is the underlying dynamic RPC implementation. This provides -//! the low level [`RpcClientT::request_raw`] and [`RpcClientT::subscribe_raw`] -//! methods. This can be swapped out for a custom implementation, but by default -//! we'll rely on `jsonrpsee` for this. -//! -//! # Example -//! -//! Fetching storage keys -//! -//! ```no_run -//! # #[tokio::main] -//! # async fn main() { -//! use subxt::{ PolkadotConfig, OnlineClient, storage::StorageKey }; -//! -//! #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] -//! pub mod polkadot {} -//! -//! let api = OnlineClient::::new().await.unwrap(); -//! -//! let genesis_hash = api -//! .rpc() -//! .genesis_hash() -//! .await -//! .unwrap(); -//! -//! println!("{genesis_hash}"); -//! # } -//! ``` - -// Allow an `rpc.rs` file in the `rpc` folder to align better -// with other file names for their types. -#![allow(clippy::module_inception)] - -#[cfg(feature = "jsonrpsee")] -mod jsonrpsee_impl; - -mod rpc; -mod rpc_client; -mod rpc_client_t; - -// Expose our RPC types here. -pub mod types; - -// Expose the `Rpc` struct. -pub use rpc::*; - -pub use rpc_client_t::{ - RawValue, RpcClientT, RpcFuture, RpcSubscription, RpcSubscriptionId, RpcSubscriptionStream, -}; - -pub use rpc_client::{rpc_params, RpcClient, RpcParams, Subscription}; diff --git a/subxt/src/rpc/rpc.rs b/subxt/src/rpc/rpc.rs deleted file mode 100644 index 1d2be6e5bd..0000000000 --- a/subxt/src/rpc/rpc.rs +++ /dev/null @@ -1,645 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! RPC types and client for interacting with a substrate node. -//! -//! This is used behind the scenes by various `subxt` APIs, but can -//! also be used directly. -//! -//! # Example -//! -//! Fetching the chain genesis hash. -//! -//! ```no_run -//! # #[tokio::main] -//! # async fn main() { -//! use subxt::{ PolkadotConfig, OnlineClient, storage::StorageKey }; -//! -//! #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] -//! pub mod polkadot {} -//! -//! let api = OnlineClient::::new().await.unwrap(); -//! -//! let genesis_hash = api -//! .rpc() -//! .genesis_hash() -//! .await -//! .unwrap(); -//! -//! println!("{genesis_hash}"); -//! # } -//! ``` - -use std::sync::Arc; - -use codec::{Decode, Encode}; - -use crate::{error::Error, utils::PhantomDataSendSync, Config, Metadata}; - -use super::{ - rpc_params, - types::{self, FollowEvent, StorageQuery}, - RpcClient, RpcClientT, Subscription, -}; - -/// Client for substrate rpc interfaces -pub struct Rpc { - client: RpcClient, - _marker: PhantomDataSendSync, -} - -impl Clone for Rpc { - fn clone(&self) -> Self { - Self { - client: self.client.clone(), - _marker: PhantomDataSendSync::new(), - } - } -} - -// Expose subscribe/request, and also subscribe_raw/request_raw -// from the even-deeper `dyn RpcClientT` impl. -impl std::ops::Deref for Rpc { - type Target = RpcClient; - fn deref(&self) -> &Self::Target { - &self.client - } -} - -impl Rpc { - /// Create a new [`Rpc`] - pub fn new(client: Arc) -> Self { - Self { - client: RpcClient::new(client), - _marker: PhantomDataSendSync::new(), - } - } - - /// Fetch the raw bytes for a given storage key - pub async fn storage( - &self, - key: &[u8], - hash: Option, - ) -> Result, Error> { - let params = rpc_params![to_hex(key), hash]; - let data = self.client.request("state_getStorage", params).await?; - Ok(data) - } - - /// Returns the keys with prefix with pagination support. - /// Up to `count` keys will be returned. - /// If `start_key` is passed, return next keys in storage in lexicographic order. - pub async fn storage_keys_paged( - &self, - key: &[u8], - count: u32, - start_key: Option<&[u8]>, - hash: Option, - ) -> Result, Error> { - let start_key = start_key.map(to_hex); - let params = rpc_params![to_hex(key), count, start_key, hash]; - let data = self.client.request("state_getKeysPaged", params).await?; - Ok(data) - } - - /// Query historical storage entries - pub async fn query_storage( - &self, - keys: impl IntoIterator, - from: T::Hash, - to: Option, - ) -> Result>, Error> { - let keys: Vec = keys.into_iter().map(to_hex).collect(); - let params = rpc_params![keys, from, to]; - self.client - .request("state_queryStorage", params) - .await - .map_err(Into::into) - } - - /// Query historical storage entries - pub async fn query_storage_at( - &self, - keys: impl IntoIterator, - at: Option, - ) -> Result>, Error> { - let keys: Vec = keys.into_iter().map(to_hex).collect(); - let params = rpc_params![keys, at]; - self.client - .request("state_queryStorageAt", params) - .await - .map_err(Into::into) - } - - /// Fetch the genesis hash - pub async fn genesis_hash(&self) -> Result { - let block_zero = 0u32; - let params = rpc_params![block_zero]; - let genesis_hash: Option = - self.client.request("chain_getBlockHash", params).await?; - genesis_hash.ok_or_else(|| "Genesis hash not found".into()) - } - - /// Fetch the metadata via the legacy `state_getMetadata` RPC method. - pub async fn metadata_legacy(&self, at: Option) -> Result { - let bytes: types::Bytes = self - .client - .request("state_getMetadata", rpc_params![at]) - .await?; - let metadata = Metadata::decode(&mut &bytes[..])?; - Ok(metadata) - } - - /// Fetch system properties - pub async fn system_properties(&self) -> Result { - self.client - .request("system_properties", rpc_params![]) - .await - } - - /// Fetch system health - pub async fn system_health(&self) -> Result { - self.client.request("system_health", rpc_params![]).await - } - - /// Fetch system chain - pub async fn system_chain(&self) -> Result { - self.client.request("system_chain", rpc_params![]).await - } - - /// Fetch system name - pub async fn system_name(&self) -> Result { - self.client.request("system_name", rpc_params![]).await - } - - /// Fetch system version - pub async fn system_version(&self) -> Result { - self.client.request("system_version", rpc_params![]).await - } - - /// Get a header - pub async fn header(&self, hash: Option) -> Result, Error> { - let params = rpc_params![hash]; - let header = self.client.request("chain_getHeader", params).await?; - Ok(header) - } - - /// Get a block hash, returns hash of latest block by default - pub async fn block_hash( - &self, - block_number: Option, - ) -> Result, Error> { - let params = rpc_params![block_number]; - let block_hash = self.client.request("chain_getBlockHash", params).await?; - Ok(block_hash) - } - - /// Get a block hash of the latest finalized block - pub async fn finalized_head(&self) -> Result { - let hash = self - .client - .request("chain_getFinalizedHead", rpc_params![]) - .await?; - Ok(hash) - } - - /// Get a Block - pub async fn block( - &self, - hash: Option, - ) -> Result>, Error> { - let params = rpc_params![hash]; - let block = self.client.request("chain_getBlock", params).await?; - Ok(block) - } - - /// Reexecute the specified `block_hash` and gather statistics while doing so. - /// - /// This function requires the specified block and its parent to be available - /// at the queried node. If either the specified block or the parent is pruned, - /// this function will return `None`. - pub async fn block_stats( - &self, - block_hash: T::Hash, - ) -> Result, Error> { - let params = rpc_params![block_hash]; - let stats = self.client.request("dev_getBlockStats", params).await?; - Ok(stats) - } - - /// Get proof of storage entries at a specific block's state. - pub async fn read_proof( - &self, - keys: impl IntoIterator, - hash: Option, - ) -> Result, Error> { - let keys: Vec = keys.into_iter().map(to_hex).collect(); - let params = rpc_params![keys, hash]; - let proof = self.client.request("state_getReadProof", params).await?; - Ok(proof) - } - - /// Fetch the runtime version - pub async fn runtime_version( - &self, - at: Option, - ) -> Result { - let params = rpc_params![at]; - let version = self - .client - .request("state_getRuntimeVersion", params) - .await?; - Ok(version) - } - - /// Subscribe to all new best block headers. - pub async fn subscribe_best_block_headers(&self) -> Result, Error> { - let subscription = self - .client - .subscribe( - // Despite the name, this returns a stream of all new blocks - // imported by the node that happen to be added to the current best chain - // (ie all best blocks). - "chain_subscribeNewHeads", - rpc_params![], - "chain_unsubscribeNewHeads", - ) - .await?; - - Ok(subscription) - } - - /// Subscribe to all new block headers. - pub async fn subscribe_all_block_headers(&self) -> Result, Error> { - let subscription = self - .client - .subscribe( - // Despite the name, this returns a stream of all new blocks - // imported by the node that happen to be added to the current best chain - // (ie all best blocks). - "chain_subscribeAllHeads", - rpc_params![], - "chain_unsubscribeAllHeads", - ) - .await?; - - Ok(subscription) - } - - /// Subscribe to finalized block headers. - /// - /// Note: this may not produce _every_ block in the finalized chain; - /// sometimes multiple blocks are finalized at once, and in this case only the - /// latest one is returned. the higher level APIs that use this "fill in" the - /// gaps for us. - pub async fn subscribe_finalized_block_headers( - &self, - ) -> Result, Error> { - let subscription = self - .client - .subscribe( - "chain_subscribeFinalizedHeads", - rpc_params![], - "chain_unsubscribeFinalizedHeads", - ) - .await?; - Ok(subscription) - } - - /// Subscribe to runtime version updates that produce changes in the metadata. - /// The first item emitted by the stream is the current runtime version. - pub async fn subscribe_runtime_version( - &self, - ) -> Result, Error> { - let subscription = self - .client - .subscribe( - "state_subscribeRuntimeVersion", - rpc_params![], - "state_unsubscribeRuntimeVersion", - ) - .await?; - Ok(subscription) - } - - /// Create and submit an extrinsic and return corresponding Hash if successful - pub async fn submit_extrinsic(&self, extrinsic: X) -> Result { - let bytes: types::Bytes = extrinsic.encode().into(); - let params = rpc_params![bytes]; - let xt_hash = self - .client - .request("author_submitExtrinsic", params) - .await?; - Ok(xt_hash) - } - - /// Execute a runtime API call via `state_call` RPC method. - pub async fn state_call_raw( - &self, - function: &str, - call_parameters: Option<&[u8]>, - at: Option, - ) -> Result { - let call_parameters = call_parameters.unwrap_or_default(); - let bytes: types::Bytes = self - .client - .request( - "state_call", - rpc_params![function, to_hex(call_parameters), at], - ) - .await?; - Ok(bytes) - } - - /// Execute a runtime API call and decode the result. - pub async fn state_call( - &self, - function: &str, - call_parameters: Option<&[u8]>, - at: Option, - ) -> Result { - let bytes = self.state_call_raw(function, call_parameters, at).await?; - let cursor = &mut &bytes[..]; - let res: Res = Decode::decode(cursor)?; - Ok(res) - } - - /// Provide a list of the supported metadata versions of the node. - pub async fn metadata_versions(&self) -> Result, Error> { - let versions = self - .state_call("Metadata_metadata_versions", None, None) - .await?; - - Ok(versions) - } - - /// Execute runtime API call and return the specified runtime metadata version. - pub async fn metadata_at_version(&self, version: u32) -> Result { - let param = version.encode(); - let opaque: Option = self - .state_call("Metadata_metadata_at_version", Some(¶m), None) - .await?; - - let bytes = opaque.ok_or(Error::Other("Metadata version not found".into()))?; - - let metadata: Metadata = Decode::decode(&mut &bytes.0[..])?; - Ok(metadata) - } - - /// Execute a runtime API call into `Metadata_metadata` method - /// to fetch the latest available metadata. - /// - /// # Note - /// - /// This returns the same output as [`Self::metadata`], but calls directly - /// into the runtime. - pub async fn metadata(&self) -> Result { - let bytes: frame_metadata::OpaqueMetadata = - self.state_call("Metadata_metadata", None, None).await?; - - let metadata: Metadata = Decode::decode(&mut &bytes.0[..])?; - Ok(metadata) - } - - /// Create and submit an extrinsic and return a subscription to the events triggered. - pub async fn watch_extrinsic( - &self, - extrinsic: X, - ) -> Result>, Error> { - let bytes: types::Bytes = extrinsic.encode().into(); - let params = rpc_params![bytes]; - let subscription = self - .client - .subscribe( - "author_submitAndWatchExtrinsic", - params, - "author_unwatchExtrinsic", - ) - .await?; - Ok(subscription) - } - - /// Insert a key into the keystore. - pub async fn insert_key( - &self, - key_type: String, - suri: String, - public: types::Bytes, - ) -> Result<(), Error> { - let params = rpc_params![key_type, suri, public]; - self.client.request("author_insertKey", params).await?; - Ok(()) - } - - /// Generate new session keys and returns the corresponding public keys. - pub async fn rotate_keys(&self) -> Result { - self.client - .request("author_rotateKeys", rpc_params![]) - .await - } - - /// Checks if the keystore has private keys for the given session public keys. - /// - /// `session_keys` is the SCALE encoded session keys object from the runtime. - /// - /// Returns `true` iff all private keys could be found. - pub async fn has_session_keys(&self, session_keys: types::Bytes) -> Result { - let params = rpc_params![session_keys]; - self.client.request("author_hasSessionKeys", params).await - } - - /// Checks if the keystore has private keys for the given public key and key type. - /// - /// Returns `true` if a private key could be found. - pub async fn has_key(&self, public_key: types::Bytes, key_type: String) -> Result { - let params = rpc_params![public_key, key_type]; - self.client.request("author_hasKey", params).await - } - - /// Submits the extrinsic to the dry_run RPC, to test if it would succeed. - /// - /// Returns a [`types::DryRunResult`], which is the result of performing the dry run. - pub async fn dry_run( - &self, - encoded_signed: &[u8], - at: Option, - ) -> Result { - let params = rpc_params![to_hex(encoded_signed), at]; - let result_bytes: types::Bytes = self.client.request("system_dryRun", params).await?; - Ok(types::DryRunResultBytes(result_bytes.0)) - } - - /// Subscribe to `chainHead_unstable_follow` to obtain all reported blocks by the chain. - /// - /// The subscription ID can be used to make queries for the - /// block's body ([`chainhead_unstable_body`](Rpc::chainhead_unstable_follow)), - /// block's header ([`chainhead_unstable_header`](Rpc::chainhead_unstable_header)), - /// block's storage ([`chainhead_unstable_storage`](Rpc::chainhead_unstable_storage)) and submitting - /// runtime API calls at this block ([`chainhead_unstable_call`](Rpc::chainhead_unstable_call)). - /// - /// # Note - /// - /// When the user is no longer interested in a block, the user is responsible - /// for calling the [`chainhead_unstable_unpin`](Rpc::chainhead_unstable_unpin) method. - /// Failure to do so will result in the subscription being stopped by generating the `Stop` event. - pub async fn chainhead_unstable_follow( - &self, - runtime_updates: bool, - ) -> Result>, Error> { - let subscription = self - .client - .subscribe( - "chainHead_unstable_follow", - rpc_params![runtime_updates], - "chainHead_unstable_unfollow", - ) - .await?; - - Ok(subscription) - } - - /// Call the `chainHead_unstable_body` method and return an operation ID to obtain the block's body. - /// - /// The response events are provided on the `chainHead_follow` subscription and identified by - /// the returned operation ID. - /// - /// # Note - /// - /// The subscription ID is obtained from an open subscription created by - /// [`chainhead_unstable_follow`](Rpc::chainhead_unstable_follow). - pub async fn chainhead_unstable_body( - &self, - subscription_id: String, - hash: T::Hash, - ) -> Result { - let response = self - .client - .request( - "chainHead_unstable_body", - rpc_params![subscription_id, hash], - ) - .await?; - - Ok(response) - } - - /// Get the block's body using the `chainHead_unstable_header` method. - /// - /// # Note - /// - /// The subscription ID is obtained from an open subscription created by - /// [`chainhead_unstable_follow`](Rpc::chainhead_unstable_follow). - pub async fn chainhead_unstable_header( - &self, - subscription_id: String, - hash: T::Hash, - ) -> Result, Error> { - let header = self - .client - .request( - "chainHead_unstable_header", - rpc_params![subscription_id, hash], - ) - .await?; - - Ok(header) - } - - /// Call the `chainhead_unstable_storage` method and return an operation ID to obtain the block's storage. - /// - /// The response events are provided on the `chainHead_follow` subscription and identified by - /// the returned operation ID. - /// - /// # Note - /// - /// The subscription ID is obtained from an open subscription created by - /// [`chainhead_unstable_follow`](Rpc::chainhead_unstable_follow). - pub async fn chainhead_unstable_storage( - &self, - subscription_id: String, - hash: T::Hash, - items: Vec>, - child_key: Option<&[u8]>, - ) -> Result { - let items: Vec> = items - .into_iter() - .map(|item| StorageQuery { - key: to_hex(item.key), - query_type: item.query_type, - }) - .collect(); - - let response = self - .client - .request( - "chainHead_unstable_storage", - rpc_params![subscription_id, hash, items, child_key.map(to_hex)], - ) - .await?; - - Ok(response) - } - - /// Call the `chainhead_unstable_storage` method and return an operation ID to obtain the runtime API result. - /// - /// The response events are provided on the `chainHead_follow` subscription and identified by - /// the returned operation ID. - /// - /// # Note - /// - /// The subscription ID is obtained from an open subscription created by - /// [`chainhead_unstable_follow`](Rpc::chainhead_unstable_follow). - pub async fn chainhead_unstable_call( - &self, - subscription_id: String, - hash: T::Hash, - function: String, - call_parameters: &[u8], - ) -> Result { - let response = self - .client - .request( - "chainHead_unstable_call", - rpc_params![subscription_id, hash, function, to_hex(call_parameters)], - ) - .await?; - - Ok(response) - } - - /// Unpin a block reported by the `chainHead_follow` subscription. - /// - /// # Note - /// - /// The subscription ID is obtained from an open subscription created by - /// [`chainhead_unstable_follow`](Rpc::chainhead_unstable_follow). - pub async fn chainhead_unstable_unpin( - &self, - subscription_id: String, - hash: T::Hash, - ) -> Result<(), Error> { - self.client - .request( - "chainHead_unstable_unpin", - rpc_params![subscription_id, hash], - ) - .await?; - - Ok(()) - } - - /// Get genesis hash obtained from the `chainHead_genesisHash` method. - pub async fn chainhead_unstable_genesishash(&self) -> Result { - let hash = self - .client - .request("chainHead_unstable_genesisHash", rpc_params![]) - .await?; - - Ok(hash) - } -} - -fn to_hex(bytes: impl AsRef<[u8]>) -> String { - format!("0x{}", hex::encode(bytes.as_ref())) -} diff --git a/subxt/src/rpc/types.rs b/subxt/src/rpc/types.rs deleted file mode 100644 index 587199fb11..0000000000 --- a/subxt/src/rpc/types.rs +++ /dev/null @@ -1,970 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Types sent to/from the Substrate RPC interface. - -use crate::{metadata::Metadata, Config}; -use codec::{Decode, Encode}; -use primitive_types::U256; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; - -// Subscription types are returned from some calls, so expose it with the rest of the returned types. -pub use super::rpc_client::Subscription; - -/// An error dry running an extrinsic. -#[derive(Debug, PartialEq, Eq)] -pub enum DryRunResult { - /// The transaction could be included in the block and executed. - Success, - /// The transaction could be included in the block, but the call failed to dispatch. - DispatchError(crate::error::DispatchError), - /// The transaction could not be included in the block. - TransactionValidityError, -} - -/// The bytes representing an error dry running an extrinsic. -pub struct DryRunResultBytes(pub Vec); - -impl DryRunResultBytes { - /// Attempt to decode the error bytes into a [`DryRunResult`] using the provided [`Metadata`]. - pub fn into_dry_run_result(self, metadata: &Metadata) -> Result { - // dryRun returns an ApplyExtrinsicResult, which is basically a - // `Result, TransactionValidityError>`. - let bytes = self.0; - if bytes[0] == 0 && bytes[1] == 0 { - // Ok(Ok(())); transaction is valid and executed ok - Ok(DryRunResult::Success) - } else if bytes[0] == 0 && bytes[1] == 1 { - // Ok(Err(dispatch_error)); transaction is valid but execution failed - let dispatch_error = - crate::error::DispatchError::decode_from(&bytes[2..], metadata.clone())?; - Ok(DryRunResult::DispatchError(dispatch_error)) - } else if bytes[0] == 1 { - // Err(transaction_error); some transaction validity error (we ignore the details at the moment) - Ok(DryRunResult::TransactionValidityError) - } else { - // unable to decode the bytes; they aren't what we expect. - Err(crate::Error::Unknown(bytes)) - } - } -} - -/// A number type that can be serialized both as a number or a string that encodes a number in a -/// string. -/// -/// We allow two representations of the block number as input. Either we deserialize to the type -/// that is specified in the block type or we attempt to parse given hex value. -/// -/// The primary motivation for having this type is to avoid overflows when using big integers in -/// JavaScript (which we consider as an important RPC API consumer). -#[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] -#[serde(untagged)] -pub enum NumberOrHex { - /// The number represented directly. - Number(u64), - /// Hex representation of the number. - Hex(U256), -} - -/// Hex-serialized shim for `Vec`. -#[derive(PartialEq, Eq, Clone, Serialize, Deserialize, Hash, PartialOrd, Ord, Debug)] -pub struct Bytes(#[serde(with = "impl_serde::serialize")] pub Vec); -impl std::ops::Deref for Bytes { - type Target = [u8]; - fn deref(&self) -> &[u8] { - &self.0[..] - } -} -impl From> for Bytes { - fn from(s: Vec) -> Self { - Bytes(s) - } -} - -/// The response from `chain_getBlock` -#[derive(Debug, Deserialize)] -#[serde(bound = "T: Config")] -pub struct ChainBlockResponse { - /// The block itself. - pub block: ChainBlock, - /// Block justification. - pub justifications: Option>, -} - -/// Block details in the [`ChainBlockResponse`]. -#[derive(Debug, Deserialize)] -pub struct ChainBlock { - /// The block header. - pub header: T::Header, - /// The accompanying extrinsics. - pub extrinsics: Vec, -} - -/// An abstraction over justification for a block's validity under a consensus algorithm. -pub type Justification = (ConsensusEngineId, EncodedJustification); -/// Consensus engine unique ID. -pub type ConsensusEngineId = [u8; 4]; -/// The encoded justification specific to a consensus engine. -pub type EncodedJustification = Vec; - -/// Bytes representing an extrinsic in a [`ChainBlock`]. -#[derive(Clone, Debug, Deserialize)] -pub struct ChainBlockExtrinsic(#[serde(with = "impl_serde::serialize")] pub Vec); - -/// Wrapper for NumberOrHex to allow custom From impls -#[derive(Serialize)] -pub struct BlockNumber(NumberOrHex); - -impl From for BlockNumber { - fn from(x: NumberOrHex) -> Self { - BlockNumber(x) - } -} - -impl Default for NumberOrHex { - fn default() -> Self { - Self::Number(Default::default()) - } -} - -impl NumberOrHex { - /// Converts this number into an U256. - pub fn into_u256(self) -> U256 { - match self { - NumberOrHex::Number(n) => n.into(), - NumberOrHex::Hex(h) => h, - } - } -} - -impl From for NumberOrHex { - fn from(n: u32) -> Self { - NumberOrHex::Number(n.into()) - } -} - -impl From for NumberOrHex { - fn from(n: u64) -> Self { - NumberOrHex::Number(n) - } -} - -impl From for NumberOrHex { - fn from(n: u128) -> Self { - NumberOrHex::Hex(n.into()) - } -} - -impl From for NumberOrHex { - fn from(n: U256) -> Self { - NumberOrHex::Hex(n) - } -} - -/// An error type that signals an out-of-range conversion attempt. -#[derive(Debug, thiserror::Error)] -#[error("Out-of-range conversion attempt")] -pub struct TryFromIntError; - -impl TryFrom for u32 { - type Error = TryFromIntError; - fn try_from(num_or_hex: NumberOrHex) -> Result { - num_or_hex - .into_u256() - .try_into() - .map_err(|_| TryFromIntError) - } -} - -impl TryFrom for u64 { - type Error = TryFromIntError; - fn try_from(num_or_hex: NumberOrHex) -> Result { - num_or_hex - .into_u256() - .try_into() - .map_err(|_| TryFromIntError) - } -} - -impl TryFrom for u128 { - type Error = TryFromIntError; - fn try_from(num_or_hex: NumberOrHex) -> Result { - num_or_hex - .into_u256() - .try_into() - .map_err(|_| TryFromIntError) - } -} - -impl From for U256 { - fn from(num_or_hex: NumberOrHex) -> U256 { - num_or_hex.into_u256() - } -} - -// All unsigned ints can be converted into a BlockNumber: -macro_rules! into_block_number { - ($($t: ty)+) => { - $( - impl From<$t> for BlockNumber { - fn from(x: $t) -> Self { - NumberOrHex::Number(x.into()).into() - } - } - )+ - } -} -into_block_number!(u8 u16 u32 u64); - -/// Arbitrary properties defined in the chain spec as a JSON object. -pub type SystemProperties = serde_json::Map; - -/// Possible transaction status events. -/// -/// # Note -/// -/// This is copied from `sp-transaction-pool` to avoid a dependency on that crate. Therefore it -/// must be kept compatible with that type from the target substrate version. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum SubstrateTxStatus { - /// Transaction is part of the future queue. - Future, - /// Transaction is part of the ready queue. - Ready, - /// The transaction has been broadcast to the given peers. - Broadcast(Vec), - /// Transaction has been included in block with given hash. - InBlock(BlockHash), - /// The block this transaction was included in has been retracted. - Retracted(BlockHash), - /// Maximum number of finality watchers has been reached, - /// old watchers are being removed. - FinalityTimeout(BlockHash), - /// Transaction has been finalized by a finality-gadget, e.g GRANDPA - Finalized(BlockHash), - /// Transaction has been replaced in the pool, by another transaction - /// that provides the same tags. (e.g. same (sender, nonce)). - Usurped(Hash), - /// Transaction has been dropped from the pool because of the limit. - Dropped, - /// Transaction is no longer valid in the current state. - Invalid, -} - -/// This contains the runtime version information necessary to make transactions, as obtained from -/// the RPC call `state_getRuntimeVersion`, -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct RuntimeVersion { - /// Version of the runtime specification. A full-node will not attempt to use its native - /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, - /// `spec_version` and `authoring_version` are the same between Wasm and native. - pub spec_version: u32, - - /// All existing dispatches are fully compatible when this number doesn't change. If this - /// number changes, then `spec_version` must change, also. - /// - /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, - /// either through an alteration in its user-level semantics, a parameter - /// added/removed/changed, a dispatchable being removed, a module being removed, or a - /// dispatchable/module changing its index. - /// - /// It need *not* change when a new module is added or when a dispatchable is added. - pub transaction_version: u32, - - /// The other fields present may vary and aren't necessary for `subxt`; they are preserved in - /// this map. - #[serde(flatten)] - pub other: HashMap, -} - -/// ReadProof struct returned by the RPC -/// -/// # Note -/// -/// This is copied from `sc-rpc-api` to avoid a dependency on that crate. Therefore it -/// must be kept compatible with that type from the target substrate version. -#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ReadProof { - /// Block hash used to generate the proof - pub at: Hash, - /// A proof used to prove that storage entries are included in the storage trie - pub proof: Vec, -} - -/// Statistics of a block returned by the `dev_getBlockStats` RPC. -#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct BlockStats { - /// The length in bytes of the storage proof produced by executing the block. - pub witness_len: u64, - /// The length in bytes of the storage proof after compaction. - pub witness_compact_len: u64, - /// Length of the block in bytes. - /// - /// This information can also be acquired by downloading the whole block. This merely - /// saves some complexity on the client side. - pub block_len: u64, - /// Number of extrinsics in the block. - /// - /// This information can also be acquired by downloading the whole block. This merely - /// saves some complexity on the client side. - pub num_extrinsics: u64, -} - -/// Storage key. -#[derive( - Serialize, Deserialize, Hash, PartialOrd, Ord, PartialEq, Eq, Clone, Encode, Decode, Debug, -)] -pub struct StorageKey(#[serde(with = "impl_serde::serialize")] pub Vec); -impl AsRef<[u8]> for StorageKey { - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -/// Storage data. -#[derive( - Serialize, Deserialize, Hash, PartialOrd, Ord, PartialEq, Eq, Clone, Encode, Decode, Debug, -)] -pub struct StorageData(#[serde(with = "impl_serde::serialize")] pub Vec); -impl AsRef<[u8]> for StorageData { - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -/// Storage change set -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -#[serde(rename_all = "camelCase")] -pub struct StorageChangeSet { - /// Block hash - pub block: Hash, - /// A list of changes - pub changes: Vec<(StorageKey, Option)>, -} - -/// Health struct returned by the RPC -#[derive(Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Health { - /// Number of connected peers - pub peers: usize, - /// Is the node syncing - pub is_syncing: bool, - /// Should this node have any peers - /// - /// Might be false for local chains or when running without discovery. - pub should_have_peers: bool, -} - -/// The operation could not be processed due to an error. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ErrorEvent { - /// Reason of the error. - pub error: String, -} - -/// The runtime specification of the current block. -/// -/// This event is generated for: -/// - the first announced block by the follow subscription -/// - blocks that suffered a change in runtime compared with their parents -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct RuntimeVersionEvent { - /// The runtime version. - pub spec: RuntimeVersion, -} - -/// The runtime event generated if the `follow` subscription -/// has set the `with_runtime` flag. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(tag = "type")] -pub enum RuntimeEvent { - /// The runtime version of this block. - Valid(RuntimeVersionEvent), - /// The runtime could not be obtained due to an error. - Invalid(ErrorEvent), -} - -/// Contain information about the latest finalized block. -/// -/// # Note -/// -/// This is the first event generated by the `follow` subscription -/// and is submitted only once. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Initialized { - /// The hash of the latest finalized block. - pub finalized_block_hash: Hash, - /// The runtime version of the finalized block. - /// - /// # Note - /// - /// This is present only if the `with_runtime` flag is set for - /// the `follow` subscription. - pub finalized_block_runtime: Option, -} - -/// Indicate a new non-finalized block. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct NewBlock { - /// The hash of the new block. - pub block_hash: Hash, - /// The parent hash of the new block. - pub parent_block_hash: Hash, - /// The runtime version of the new block. - /// - /// # Note - /// - /// This is present only if the `with_runtime` flag is set for - /// the `follow` subscription. - pub new_runtime: Option, -} - -/// Indicate the block hash of the new best block. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct BestBlockChanged { - /// The block hash of the new best block. - pub best_block_hash: Hash, -} - -/// Indicate the finalized and pruned block hashes. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct Finalized { - /// Block hashes that are finalized. - pub finalized_block_hashes: Vec, - /// Block hashes that are pruned (removed). - pub pruned_block_hashes: Vec, -} - -/// Indicate the operation id of the event. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct OperationId { - /// The operation id of the event. - pub operation_id: String, -} - -/// The response of the `chainHead_body` method. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct OperationBodyDone { - /// The operation id of the event. - pub operation_id: String, - /// Array of hexadecimal-encoded scale-encoded extrinsics found in the block. - pub value: Vec, -} - -/// The response of the `chainHead_call` method. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct OperationCallDone { - /// The operation id of the event. - pub operation_id: String, - /// Hexadecimal-encoded output of the runtime function call. - pub output: String, -} - -/// The response of the `chainHead_call` method. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct OperationStorageItems { - /// The operation id of the event. - pub operation_id: String, - /// The resulting items. - pub items: Vec, -} - -/// Indicate a problem during the operation. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct OperationError { - /// The operation id of the event. - pub operation_id: String, - /// The reason of the error. - pub error: String, -} - -/// The event generated by the `follow` method. -/// -/// The block events are generated in the following order: -/// 1. Initialized - generated only once to signal the latest finalized block -/// 2. NewBlock - a new block was added. -/// 3. BestBlockChanged - indicate that the best block is now the one from this event. The block was -/// announced priorly with the `NewBlock` event. -/// 4. Finalized - State the finalized and pruned blocks. -/// -/// The following events are related to operations: -/// - OperationBodyDone: The response of the `chainHead_body` -/// - OperationCallDone: The response of the `chainHead_call` -/// - OperationStorageItems: Items produced by the `chianHead_storage` -/// - OperationWaitingForContinue: Generated after OperationStorageItems and requires the user to -/// call `chainHead_continue` -/// - OperationStorageDone: The `chainHead_storage` method has produced all the results -/// - OperationInaccessible: The server was unable to provide the result, retries might succeed in -/// the future -/// - OperationError: The server encountered an error, retries will not succeed -/// -/// The stop event indicates that the JSON-RPC server was unable to provide a consistent list of -/// the blocks at the head of the chain. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(tag = "event")] -pub enum FollowEvent { - /// The latest finalized block. - /// - /// This event is generated only once. - Initialized(Initialized), - /// A new non-finalized block was added. - NewBlock(NewBlock), - /// The best block of the chain. - BestBlockChanged(BestBlockChanged), - /// A list of finalized and pruned blocks. - Finalized(Finalized), - /// The response of the `chainHead_body` method. - OperationBodyDone(OperationBodyDone), - /// The response of the `chainHead_call` method. - OperationCallDone(OperationCallDone), - /// Yield one or more items found in the storage. - OperationStorageItems(OperationStorageItems), - /// Ask the user to call `chainHead_continue` to produce more events - /// regarding the operation id. - OperationWaitingForContinue(OperationId), - /// The responses of the `chainHead_storage` method have been produced. - OperationStorageDone(OperationId), - /// The RPC server was unable to provide the response of the following operation id. - /// - /// Repeating the same operation in the future might succeed. - OperationInaccessible(OperationId), - /// The RPC server encountered an error while processing an operation id. - /// - /// Repeating the same operation in the future will not succeed. - OperationError(OperationError), - /// The subscription is dropped and no further events - /// will be generated. - Stop, -} - -/// The storage item received as parameter. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct StorageQuery { - /// The provided key. - pub key: Key, - /// The type of the storage query. - #[serde(rename = "type")] - pub query_type: StorageQueryType, -} - -/// The type of the storage query. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum StorageQueryType { - /// Fetch the value of the provided key. - Value, - /// Fetch the hash of the value of the provided key. - Hash, - /// Fetch the closest descendant merkle value. - ClosestDescendantMerkleValue, - /// Fetch the values of all descendants of they provided key. - DescendantsValues, - /// Fetch the hashes of the values of all descendants of they provided key. - DescendantsHashes, -} - -/// The storage result. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct StorageResult { - /// The hex-encoded key of the result. - pub key: String, - /// The result of the query. - #[serde(flatten)] - pub result: StorageResultType, -} - -/// The type of the storage query. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum StorageResultType { - /// Fetch the value of the provided key. - Value(String), - /// Fetch the hash of the value of the provided key. - Hash(String), - /// Fetch the closest descendant merkle value. - ClosestDescendantMerkleValue(String), -} - -/// The method respose of `chainHead_body`, `chainHead_call` and `chainHead_storage`. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(tag = "result")] -pub enum MethodResponse { - /// The method has started. - Started(MethodResponseStarted), - /// The RPC server cannot handle the request at the moment. - LimitReached, -} - -/// The `started` result of a method. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct MethodResponseStarted { - /// The operation id of the response. - pub operation_id: String, - /// The number of items from the back of the `chainHead_storage` that have been discarded. - pub discarded_items: Option, -} - -/// The transaction was broadcasted to a number of peers. -/// -/// # Note -/// -/// The RPC does not guarantee that the peers have received the -/// transaction. -/// -/// When the number of peers is zero, the event guarantees that -/// shutting down the local node will lead to the transaction -/// not being included in the chain. -#[derive(Debug, Clone, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionBroadcasted { - /// The number of peers the transaction was broadcasted to. - #[serde(with = "as_string")] - pub num_peers: usize, -} - -/// The transaction was included in a block of the chain. -#[derive(Debug, Clone, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionBlock { - /// The hash of the block the transaction was included into. - pub hash: Hash, - /// The index (zero-based) of the transaction within the body of the block. - #[serde(with = "as_string")] - pub index: usize, -} - -/// The transaction could not be processed due to an error. -#[derive(Debug, Clone, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionError { - /// Reason of the error. - pub error: String, -} - -/// The transaction was dropped because of exceeding limits. -#[derive(Debug, Clone, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct TransactionDropped { - /// True if the transaction was broadcasted to other peers and - /// may still be included in the block. - pub broadcasted: bool, - /// Reason of the event. - pub error: String, -} - -/// Possible transaction status events. -/// -/// The status events can be grouped based on their kinds as: -/// -/// 1. Runtime validated the transaction: -/// - `Validated` -/// -/// 2. Inside the `Ready` queue: -/// - `Broadcast` -/// -/// 3. Leaving the pool: -/// - `BestChainBlockIncluded` -/// - `Invalid` -/// -/// 4. Block finalized: -/// - `Finalized` -/// -/// 5. At any time: -/// - `Dropped` -/// - `Error` -/// -/// The subscription's stream is considered finished whenever the following events are -/// received: `Finalized`, `Error`, `Invalid` or `Dropped`. However, the user is allowed -/// to unsubscribe at any moment. -#[derive(Debug, Clone, PartialEq, Deserialize)] -// We need to manually specify the trait bounds for the `Hash` trait to ensure `into` and -// `from` still work. -#[serde(bound(deserialize = "Hash: Deserialize<'de> + Clone"))] -#[serde(from = "TransactionEventIR")] -pub enum TransactionEvent { - /// The transaction was validated by the runtime. - Validated, - /// The transaction was broadcasted to a number of peers. - Broadcasted(TransactionBroadcasted), - /// The transaction was included in a best block of the chain. - /// - /// # Note - /// - /// This may contain `None` if the block is no longer a best - /// block of the chain. - BestChainBlockIncluded(Option>), - /// The transaction was included in a finalized block. - Finalized(TransactionBlock), - /// The transaction could not be processed due to an error. - Error(TransactionError), - /// The transaction is marked as invalid. - Invalid(TransactionError), - /// The client was not capable of keeping track of this transaction. - Dropped(TransactionDropped), -} - -/// Intermediate representation (IR) for the transaction events -/// that handles block events only. -/// -/// The block events require a JSON compatible interpretation similar to: -/// -/// ```json -/// { event: "EVENT", block: { hash: "0xFF", index: 0 } } -/// ``` -/// -/// This IR is introduced to circumvent that the block events need to -/// be serialized/deserialized with "tag" and "content", while other -/// events only require "tag". -#[derive(Debug, Clone, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(tag = "event", content = "block")] -enum TransactionEventBlockIR { - /// The transaction was included in the best block of the chain. - BestChainBlockIncluded(Option>), - /// The transaction was included in a finalized block of the chain. - Finalized(TransactionBlock), -} - -/// Intermediate representation (IR) for the transaction events -/// that handles non-block events only. -/// -/// The non-block events require a JSON compatible interpretation similar to: -/// -/// ```json -/// { event: "EVENT", num_peers: 0 } -/// ``` -/// -/// This IR is introduced to circumvent that the block events need to -/// be serialized/deserialized with "tag" and "content", while other -/// events only require "tag". -#[derive(Debug, Clone, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] -#[serde(tag = "event")] -enum TransactionEventNonBlockIR { - Validated, - Broadcasted(TransactionBroadcasted), - Error(TransactionError), - Invalid(TransactionError), - Dropped(TransactionDropped), -} - -/// Intermediate representation (IR) used for serialization/deserialization of the -/// [`TransactionEvent`] in a JSON compatible format. -/// -/// Serde cannot mix `#[serde(tag = "event")]` with `#[serde(tag = "event", content = "block")]` -/// for specific enum variants. Therefore, this IR is introduced to circumvent this -/// restriction, while exposing a simplified [`TransactionEvent`] for users of the -/// rust ecosystem. -#[derive(Debug, Clone, PartialEq, Deserialize)] -#[serde(bound(deserialize = "Hash: Deserialize<'de>"))] -#[serde(rename_all = "camelCase")] -#[serde(untagged)] -enum TransactionEventIR { - Block(TransactionEventBlockIR), - NonBlock(TransactionEventNonBlockIR), -} - -impl From> for TransactionEventIR { - fn from(value: TransactionEvent) -> Self { - match value { - TransactionEvent::Validated => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Validated) - } - TransactionEvent::Broadcasted(event) => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Broadcasted(event)) - } - TransactionEvent::BestChainBlockIncluded(event) => { - TransactionEventIR::Block(TransactionEventBlockIR::BestChainBlockIncluded(event)) - } - TransactionEvent::Finalized(event) => { - TransactionEventIR::Block(TransactionEventBlockIR::Finalized(event)) - } - TransactionEvent::Error(event) => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Error(event)) - } - TransactionEvent::Invalid(event) => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Invalid(event)) - } - TransactionEvent::Dropped(event) => { - TransactionEventIR::NonBlock(TransactionEventNonBlockIR::Dropped(event)) - } - } - } -} - -impl From> for TransactionEvent { - fn from(value: TransactionEventIR) -> Self { - match value { - TransactionEventIR::NonBlock(status) => match status { - TransactionEventNonBlockIR::Validated => TransactionEvent::Validated, - TransactionEventNonBlockIR::Broadcasted(event) => { - TransactionEvent::Broadcasted(event) - } - TransactionEventNonBlockIR::Error(event) => TransactionEvent::Error(event), - TransactionEventNonBlockIR::Invalid(event) => TransactionEvent::Invalid(event), - TransactionEventNonBlockIR::Dropped(event) => TransactionEvent::Dropped(event), - }, - TransactionEventIR::Block(block) => match block { - TransactionEventBlockIR::Finalized(event) => TransactionEvent::Finalized(event), - TransactionEventBlockIR::BestChainBlockIncluded(event) => { - TransactionEvent::BestChainBlockIncluded(event) - } - }, - } - } -} - -/// Serialize and deserialize helper as string. -mod as_string { - use super::*; - use serde::Deserializer; - - pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result { - String::deserialize(deserializer)? - .parse() - .map_err(|e| serde::de::Error::custom(format!("Parsing failed: {e}"))) - } -} - -#[cfg(test)] -mod test { - use super::*; - - /// A util function to assert the result of serialization and deserialization is the same. - pub fn assert_deser(s: &str, expected: T) - where - T: std::fmt::Debug + serde::ser::Serialize + serde::de::DeserializeOwned + PartialEq, - { - assert_eq!(serde_json::from_str::(s).unwrap(), expected); - assert_eq!(serde_json::to_string(&expected).unwrap(), s); - } - - // Check that some A can be serialized and then deserialized into some B. - pub fn assert_ser_deser(a: &A, b: &B) - where - A: serde::Serialize, - B: serde::de::DeserializeOwned + PartialEq + std::fmt::Debug, - { - let json = serde_json::to_string(a).expect("serializing failed"); - let new_b: B = serde_json::from_str(&json).expect("deserializing failed"); - - assert_eq!(b, &new_b); - } - - #[test] - fn runtime_version_is_substrate_compatible() { - use sp_version::RuntimeVersion as SpRuntimeVersion; - - let substrate_runtime_version = SpRuntimeVersion { - spec_version: 123, - transaction_version: 456, - ..Default::default() - }; - - let json = serde_json::to_string(&substrate_runtime_version).expect("serializing failed"); - let val: RuntimeVersion = serde_json::from_str(&json).expect("deserializing failed"); - - // We ignore any other properties. - assert_eq!(val.spec_version, 123); - assert_eq!(val.transaction_version, 456); - } - - #[test] - fn runtime_version_handles_arbitrary_params() { - let val: RuntimeVersion = serde_json::from_str( - r#"{ - "specVersion": 123, - "transactionVersion": 456, - "foo": true, - "wibble": [1,2,3] - }"#, - ) - .expect("deserializing failed"); - - let mut m = std::collections::HashMap::new(); - m.insert("foo".to_owned(), serde_json::json!(true)); - m.insert("wibble".to_owned(), serde_json::json!([1, 2, 3])); - - assert_eq!( - val, - RuntimeVersion { - spec_version: 123, - transaction_version: 456, - other: m - } - ); - } - - #[test] - fn number_or_hex_deserializes_from_either_repr() { - assert_deser(r#""0x1234""#, NumberOrHex::Hex(0x1234.into())); - assert_deser(r#""0x0""#, NumberOrHex::Hex(0.into())); - assert_deser(r#"5"#, NumberOrHex::Number(5)); - assert_deser(r#"10000"#, NumberOrHex::Number(10000)); - assert_deser(r#"0"#, NumberOrHex::Number(0)); - assert_deser(r#"1000000000000"#, NumberOrHex::Number(1000000000000)); - } - - #[test] - fn justification_is_substrate_compatible() { - use sp_runtime::Justification as SpJustification; - - // As much as anything, this just checks that the Justification type - // is still a tuple as given. - assert_ser_deser::( - &([1, 2, 3, 4], vec![5, 6, 7, 8]), - &([1, 2, 3, 4], vec![5, 6, 7, 8]), - ); - } - - #[test] - fn storage_types_are_substrate_compatible() { - use sp_core::storage::{ - StorageChangeSet as SpStorageChangeSet, StorageData as SpStorageData, - StorageKey as SpStorageKey, - }; - - assert_ser_deser( - &SpStorageKey(vec![1, 2, 3, 4, 5]), - &StorageKey(vec![1, 2, 3, 4, 5]), - ); - assert_ser_deser( - &SpStorageData(vec![1, 2, 3, 4, 5]), - &StorageData(vec![1, 2, 3, 4, 5]), - ); - assert_ser_deser( - &SpStorageChangeSet { - block: 1u64, - changes: vec![(SpStorageKey(vec![1]), Some(SpStorageData(vec![2])))], - }, - &StorageChangeSet { - block: 1u64, - changes: vec![(StorageKey(vec![1]), Some(StorageData(vec![2])))], - }, - ); - } -} diff --git a/subxt/src/runtime_api/runtime_client.rs b/subxt/src/runtime_api/runtime_client.rs index a785a4259f..f75aa2d862 100644 --- a/subxt/src/runtime_api/runtime_client.rs +++ b/subxt/src/runtime_api/runtime_client.rs @@ -4,7 +4,7 @@ use super::runtime_types::RuntimeApi; -use crate::{client::OnlineClientT, error::Error, Config}; +use crate::{backend::BlockRef, client::OnlineClientT, error::Error, Config}; use derivative::Derivative; use std::{future::Future, marker::PhantomData}; @@ -32,8 +32,8 @@ where Client: OnlineClientT, { /// Obtain a runtime API interface at some block hash. - pub fn at(&self, block_hash: T::Hash) -> RuntimeApi { - RuntimeApi::new(self.client.clone(), block_hash) + pub fn at(&self, block_ref: impl Into>) -> RuntimeApi { + RuntimeApi::new(self.client.clone(), block_ref.into()) } /// Obtain a runtime API interface at the latest block hash. @@ -44,14 +44,10 @@ where // return a Future that's Send + 'static, rather than tied to &self. let client = self.client.clone(); async move { - // get the hash for the latest block and use that. - let block_hash = client - .rpc() - .block_hash(None) - .await? - .expect("didn't pass a block number; qed"); + // get the ref for the latest block and use that. + let block_ref = client.backend().latest_best_block_ref().await?; - Ok(RuntimeApi::new(client, block_hash)) + Ok(RuntimeApi::new(client, block_ref)) } } } diff --git a/subxt/src/runtime_api/runtime_types.rs b/subxt/src/runtime_api/runtime_types.rs index 3df2d2957d..3ea8c93947 100644 --- a/subxt/src/runtime_api/runtime_types.rs +++ b/subxt/src/runtime_api/runtime_types.rs @@ -3,6 +3,7 @@ // see LICENSE for license details. use crate::{ + backend::{BackendExt, BlockRef}, client::OnlineClientT, error::{Error, MetadataError}, metadata::DecodeWithMetadata, @@ -19,16 +20,16 @@ use super::RuntimeApiPayload; #[derivative(Clone(bound = "Client: Clone"))] pub struct RuntimeApi { client: Client, - block_hash: T::Hash, + block_ref: BlockRef, _marker: PhantomData, } impl RuntimeApi { /// Create a new [`RuntimeApi`] - pub(crate) fn new(client: Client, block_hash: T::Hash) -> Self { + pub(crate) fn new(client: Client, block_ref: BlockRef) -> Self { Self { client, - block_hash, + block_ref, _marker: PhantomData, } } @@ -46,13 +47,13 @@ where call_parameters: Option<&'a [u8]>, ) -> impl Future> + 'a { let client = self.client.clone(); - let block_hash = self.block_hash; + let block_hash = self.block_ref.hash(); // Ensure that the returned future doesn't have a lifetime tied to api.runtime_api(), // which is a temporary thing we'll be throwing away quickly: async move { let data: Res = client - .rpc() - .state_call(function, call_parameters, Some(block_hash)) + .backend() + .call_decoding(function, call_parameters, block_hash) .await?; Ok(data) } @@ -64,7 +65,7 @@ where payload: Call, ) -> impl Future> { let client = self.client.clone(); - let block_hash = self.block_hash; + let block_hash = self.block_ref.hash(); // Ensure that the returned future doesn't have a lifetime tied to api.runtime_api(), // which is a temporary thing we'll be throwing away quickly: async move { @@ -94,8 +95,8 @@ where let call_name = format!("{}_{}", payload.trait_name(), payload.method_name()); let bytes = client - .rpc() - .state_call_raw(&call_name, Some(params.as_slice()), Some(block_hash)) + .backend() + .call(&call_name, Some(params.as_slice()), block_hash) .await?; let value = ::decode_with_metadata( diff --git a/subxt/src/storage/mod.rs b/subxt/src/storage/mod.rs index 5df31e8f64..0219cd8caf 100644 --- a/subxt/src/storage/mod.rs +++ b/subxt/src/storage/mod.rs @@ -12,10 +12,7 @@ pub mod utils; pub use storage_client::StorageClient; -pub use storage_type::{KeyIter, Storage}; - -// Re-export as this is used in the public API in this module: -pub use crate::rpc::types::StorageKey; +pub use storage_type::Storage; /// Types representing an address which describes where a storage /// entry lives and how to properly decode it. diff --git a/subxt/src/storage/storage_client.rs b/subxt/src/storage/storage_client.rs index 2055c80ace..4fb980e596 100644 --- a/subxt/src/storage/storage_client.rs +++ b/subxt/src/storage/storage_client.rs @@ -6,8 +6,8 @@ use super::{ storage_type::{validate_storage_address, Storage}, utils, StorageAddress, }; - use crate::{ + backend::BlockRef, client::{OfflineClientT, OnlineClientT}, error::Error, Config, @@ -73,8 +73,8 @@ where Client: OnlineClientT, { /// Obtain storage at some block hash. - pub fn at(&self, block_hash: T::Hash) -> Storage { - Storage::new(self.client.clone(), block_hash) + pub fn at(&self, block_ref: impl Into>) -> Storage { + Storage::new(self.client.clone(), block_ref.into()) } /// Obtain storage at the latest block hash. @@ -85,14 +85,10 @@ where // return a Future that's Send + 'static, rather than tied to &self. let client = self.client.clone(); async move { - // get the hash for the latest block and use that. - let block_hash = client - .rpc() - .block_hash(None) - .await? - .expect("didn't pass a block number; qed"); + // get the ref for the latest block and use that. + let block_ref = client.backend().latest_best_block_ref().await?; - Ok(Storage::new(client, block_hash)) + Ok(Storage::new(client, block_ref)) } } } diff --git a/subxt/src/storage/storage_type.rs b/subxt/src/storage/storage_type.rs index b4facee0d5..aa57c361cb 100644 --- a/subxt/src/storage/storage_type.rs +++ b/subxt/src/storage/storage_type.rs @@ -5,32 +5,36 @@ use super::storage_address::{StorageAddress, Yes}; use crate::{ + backend::{BackendExt, BlockRef}, client::OnlineClientT, error::{Error, MetadataError}, metadata::{DecodeWithMetadata, Metadata}, - rpc::types::{StorageData, StorageKey}, Config, }; use codec::Decode; use derivative::Derivative; +use futures::StreamExt; use std::{future::Future, marker::PhantomData}; use subxt_metadata::{PalletMetadata, StorageEntryMetadata, StorageEntryType}; +/// This is returned from a couple of storage functions. +pub use crate::backend::StreamOfResults; + /// Query the runtime storage. #[derive(Derivative)] #[derivative(Clone(bound = "Client: Clone"))] pub struct Storage { client: Client, - block_hash: T::Hash, + block_ref: BlockRef, _marker: PhantomData, } impl Storage { /// Create a new [`Storage`] - pub(crate) fn new(client: Client, block_hash: T::Hash) -> Self { + pub(crate) fn new(client: Client, block_ref: BlockRef) -> Self { Self { client, - block_hash, + block_ref, _marker: PhantomData, } } @@ -41,18 +45,40 @@ where T: Config, Client: OnlineClientT, { - /// Fetch the raw encoded value at the address/key given. - pub fn fetch_raw<'address>( + /// Fetch the raw encoded value at the key given. + pub fn fetch_raw( &self, - key: &'address [u8], - ) -> impl Future>, Error>> + 'address { + key: impl Into>, + ) -> impl Future>, Error>> + 'static { let client = self.client.clone(); - let block_hash = self.block_hash; - // Ensure that the returned future doesn't have a lifetime tied to api.storage(), - // which is a temporary thing we'll be throwing away quickly: + let key = key.into(); + // Keep this alive until the call is complete: + let block_ref = self.block_ref.clone(); + // Manual future so lifetime not tied to api.storage(). async move { - let data = client.rpc().storage(key, Some(block_hash)).await?; - Ok(data.map(|d| d.0)) + let data = client + .backend() + .storage_fetch_value(key, block_ref.hash()) + .await?; + Ok(data) + } + } + + /// Stream all of the raw keys underneath the key given + pub fn fetch_raw_keys( + &self, + key: impl Into>, + ) -> impl Future>, Error>> + 'static { + let client = self.client.clone(); + let block_hash = self.block_ref.hash(); + let key = key.into(); + // Manual future so lifetime not tied to api.storage(). + async move { + let keys = client + .backend() + .storage_fetch_descendant_keys(key, None, block_hash) + .await?; + Ok(keys) } } @@ -107,7 +133,7 @@ where // Look up the return type ID to enable DecodeWithMetadata: let lookup_bytes = super::utils::storage_address_bytes(address, &metadata)?; - if let Some(data) = client.fetch_raw(&lookup_bytes).await? { + if let Some(data) = client.fetch_raw(lookup_bytes).await? { let val = decode_storage_with_metadata::(&mut &*data, &metadata, entry)?; Ok(Some(val)) @@ -146,26 +172,6 @@ where } } - /// Fetch up to `count` keys for a storage map in lexicographic order. - /// - /// Supports pagination by passing a value to `start_key`. - pub fn fetch_keys<'address>( - &self, - key: &'address [u8], - count: u32, - start_key: Option<&'address [u8]>, - ) -> impl Future, Error>> + 'address { - let client = self.client.clone(); - let block_hash = self.block_hash; - async move { - let keys = client - .rpc() - .storage_keys_paged(key, count, start_key, Some(block_hash)) - .await?; - Ok(keys) - } - } - /// Returns an iterator of key value pairs. /// /// ```no_run @@ -187,11 +193,11 @@ where /// .at_latest() /// .await /// .unwrap() - /// .iter(address, 10) + /// .iter(address) /// .await /// .unwrap(); /// - /// while let Some((key, value)) = iter.next().await.unwrap() { + /// while let Some(Ok((key, value))) = iter.next().await { /// println!("Key: 0x{}", hex::encode(&key)); /// println!("Value: {}", value); /// } @@ -200,15 +206,14 @@ where pub fn iter
( &self, address: Address, - page_size: u32, - ) -> impl Future, Error>> + 'static + ) -> impl Future, Address::Target)>, Error>> + 'static where Address: StorageAddress + 'static, { - let client = self.clone(); - let block_hash = self.block_hash; + let client = self.client.clone(); + let block_ref = self.block_ref.clone(); async move { - let metadata = client.client.metadata(); + let metadata = client.metadata(); let (pallet, entry) = lookup_entry_details(address.pallet_name(), address.entry_name(), &metadata)?; @@ -226,17 +231,25 @@ where // The root pallet/entry bytes for this storage entry: let address_root_bytes = super::utils::storage_address_root_bytes(&address); - Ok(KeyIter { - client, - address_root_bytes, - metadata, - return_type_id, - block_hash, - count: page_size, - start_key: None, - buffer: Default::default(), - _marker: std::marker::PhantomData, - }) + let s = client + .backend() + .storage_fetch_descendant_values(address_root_bytes, block_ref.hash()) + .await? + .map(move |kv| { + let kv = match kv { + Ok(kv) => kv, + Err(e) => return Err(e), + }; + let val = Address::Target::decode_with_metadata( + &mut &*kv.value, + return_type_id, + &metadata, + )?; + Ok((kv.key, val)) + }); + + let s = StreamOfResults::new(Box::pin(s)); + Ok(s) } } @@ -258,7 +271,7 @@ where )); // fetch the raw bytes and decode them into the StorageVersion struct: - let storage_version_bytes = self.fetch_raw(&key_bytes).await?.ok_or_else(|| { + let storage_version_bytes = self.fetch_raw(key_bytes).await?.ok_or_else(|| { format!( "Unexpected: entry for storage version in pallet \"{}\" not found", pallet_name.as_ref() @@ -267,7 +280,7 @@ where u16::decode(&mut &storage_version_bytes[..]).map_err(Into::into) } - /// Fetches the Wasm code of the runtime. + /// Fetch the runtime WASM code. pub async fn runtime_wasm_code(&self) -> Result, Error> { // note: this should match the `CODE` constant in `sp_core::storage::well_known_keys` const CODE: &str = ":code"; @@ -277,71 +290,6 @@ where } } -/// Iterates over key value pairs in a map. -pub struct KeyIter { - client: Storage, - address_root_bytes: Vec, - return_type_id: u32, - metadata: Metadata, - count: u32, - block_hash: T::Hash, - start_key: Option, - buffer: Vec<(StorageKey, StorageData)>, - _marker: std::marker::PhantomData, -} - -impl<'a, T, Client, ReturnTy> KeyIter -where - T: Config, - Client: OnlineClientT, - ReturnTy: DecodeWithMetadata, -{ - /// Returns the next key value pair from a map. - pub async fn next(&mut self) -> Result, Error> { - loop { - if let Some((k, v)) = self.buffer.pop() { - let val = ReturnTy::decode_with_metadata( - &mut &v.0[..], - self.return_type_id, - &self.metadata, - )?; - return Ok(Some((k, val))); - } else { - let start_key = self.start_key.take(); - let keys = self - .client - .fetch_keys( - &self.address_root_bytes, - self.count, - start_key.as_ref().map(|k| &*k.0), - ) - .await?; - - if keys.is_empty() { - return Ok(None); - } - - self.start_key = keys.last().cloned(); - - let change_sets = self - .client - .client - .rpc() - .query_storage_at(keys.iter().map(|k| &*k.0), Some(self.block_hash)) - .await?; - for change_set in change_sets { - for (k, v) in change_set.changes { - if let Some(v) = v { - self.buffer.push((k, v)); - } - } - } - debug_assert_eq!(self.buffer.len(), keys.len()); - } - } - } -} - /// Validate a storage address against the metadata. pub(crate) fn validate_storage_address( address: &Address, diff --git a/subxt/src/tx/mod.rs b/subxt/src/tx/mod.rs index 63b0130658..039db376bf 100644 --- a/subxt/src/tx/mod.rs +++ b/subxt/src/tx/mod.rs @@ -21,7 +21,10 @@ pub use self::signer::PairSigner; pub use self::{ signer::Signer, - tx_client::{PartialExtrinsic, SubmittableExtrinsic, TxClient}, + tx_client::{ + PartialExtrinsic, SubmittableExtrinsic, TransactionInvalid, TransactionUnknown, TxClient, + ValidationResult, + }, tx_payload::{dynamic, BoxedPayload, DynamicPayload, Payload, TxPayload}, tx_progress::{TxInBlock, TxProgress, TxStatus}, }; diff --git a/subxt/src/tx/tx_client.rs b/subxt/src/tx/tx_client.rs index f2a538eddd..74d7192e7a 100644 --- a/subxt/src/tx/tx_client.rs +++ b/subxt/src/tx/tx_client.rs @@ -4,21 +4,18 @@ use std::borrow::Cow; -use codec::{Compact, Decode, Encode}; -use derivative::Derivative; -use sp_core_hashing::blake2_256; - use crate::error::DecodeError; use crate::{ + backend::{BackendExt, BlockRef, TransactionStatus}, client::{OfflineClientT, OnlineClientT}, config::{Config, ExtrinsicParams, ExtrinsicParamsEncoder, Hasher}, error::{Error, MetadataError}, tx::{Signer as SignerT, TxPayload, TxProgress}, utils::{Encoded, PhantomDataSendSync}, }; - -// This is returned from an API below, so expose it here. -pub use crate::rpc::types::DryRunResult; +use codec::{Compact, Decode, Encode}; +use derivative::Derivative; +use sp_core_hashing::blake2_256; /// A client for working with transactions. #[derive(Derivative)] @@ -172,13 +169,14 @@ where { /// Get the account nonce for a given account ID. pub async fn account_nonce(&self, account_id: &T::AccountId) -> Result { + let block_ref = self.client.backend().latest_best_block_ref().await?; let account_nonce_bytes = self .client - .rpc() - .state_call_raw( + .backend() + .call( "AccountNonceApi_account_nonce", Some(&account_id.encode()), - None, + block_ref.hash(), ) .await?; @@ -429,6 +427,11 @@ where } } + /// Calculate and return the hash of the extrinsic, based on the configured hasher. + pub fn hash(&self) -> T::Hash { + T::Hasher::hash_of(&self.encoded) + } + /// Returns the SCALE encoded extrinsic bytes. pub fn encoded(&self) -> &[u8] { &self.encoded.0 @@ -452,32 +455,91 @@ where /// and obtain details about it, once it has made it into a block. pub async fn submit_and_watch(&self) -> Result, Error> { // Get a hash of the extrinsic (we'll need this later). - let ext_hash = T::Hasher::hash_of(&self.encoded); + let ext_hash = self.hash(); // Submit and watch for transaction progress. - let sub = self.client.rpc().watch_extrinsic(&self.encoded).await?; + let sub = self + .client + .backend() + .submit_transaction(&self.encoded.0) + .await?; Ok(TxProgress::new(sub, self.client.clone(), ext_hash)) } /// Submits the extrinsic to the chain for block inclusion. /// - /// Returns `Ok` with the extrinsic hash if it is valid extrinsic. - /// - /// # Note - /// - /// Success does not mean the extrinsic has been included in the block, just that it is valid - /// and has been included in the transaction pool. + /// It's usually better to call `submit_and_watch` to get an idea of the progress of the + /// submission and whether it's eventually successful or not. This call does not guarantee + /// success, and is just sending the transaction to the chain. pub async fn submit(&self) -> Result { - self.client.rpc().submit_extrinsic(&self.encoded).await + let ext_hash = self.hash(); + let mut sub = self + .client + .backend() + .submit_transaction(&self.encoded.0) + .await?; + + // If we get a bad status or error back straight away then error, else return the hash. + match sub.next().await { + Some(Ok(status)) => match status { + TransactionStatus::Validated + | TransactionStatus::Broadcasted { .. } + | TransactionStatus::InBestBlock { .. } + | TransactionStatus::InFinalizedBlock { .. } => Ok(ext_hash), + TransactionStatus::Error { message } => { + Err(Error::Other(format!("Transaction error: {message}"))) + } + TransactionStatus::Invalid { message } => { + Err(Error::Other(format!("Transaction invalid: {message}"))) + } + TransactionStatus::Dropped { message } => { + Err(Error::Other(format!("Transaction dropped: {message}"))) + } + }, + Some(Err(e)) => Err(e), + None => Err(Error::Other( + "Transaction broadcast was unsuccessful; stream terminated early".into(), + )), + } } - /// Submits the extrinsic to the dry_run RPC, to test if it would succeed. + /// Validate a transaction by submitting it to the relevant Runtime API. A transaction that is + /// valid can be added to a block, but may still end up in an error state. /// - /// Returns `Ok` with a [`DryRunResult`], which is the result of attempting to dry run the extrinsic. - pub async fn dry_run(&self, at: Option) -> Result { - let dry_run_bytes = self.client.rpc().dry_run(self.encoded(), at).await?; - dry_run_bytes.into_dry_run_result(&self.client.metadata()) + /// Returns `Ok` with a [`ValidationResult`], which is the result of attempting to dry run the extrinsic. + pub async fn validate(&self) -> Result { + let latest_block_ref = self.client.backend().latest_best_block_ref().await?; + self.validate_at(latest_block_ref).await + } + + /// Validate a transaction by submitting it to the relevant Runtime API. A transaction that is + /// valid can be added to a block, but may still end up in an error state. + /// + /// Returns `Ok` with a [`ValidationResult`], which is the result of attempting to dry run the extrinsic. + pub async fn validate_at( + &self, + at: impl Into>, + ) -> Result { + let block_hash = at.into().hash(); + + // Approach taken from https://github.com/paritytech/json-rpc-interface-spec/issues/55. + let mut params = Vec::with_capacity(8 + self.encoded.0.len() + 8); + 2u8.encode_to(&mut params); + params.extend(self.encoded().iter()); + block_hash.encode_to(&mut params); + + let res: Vec = self + .client + .backend() + .call( + "TaggedTransactionQueue_validate_transaction", + Some(¶ms), + block_hash, + ) + .await?; + + ValidationResult::try_from_bytes(res) } /// This returns an estimate for what the extrinsic is expected to cost to execute, less any tips. @@ -485,17 +547,281 @@ where pub async fn partial_fee_estimate(&self) -> Result { let mut params = self.encoded().to_vec(); (self.encoded().len() as u32).encode_to(&mut params); + let latest_block_ref = self.client.backend().latest_best_block_ref().await?; + // destructuring RuntimeDispatchInfo, see type information // data layout: {weight_ref_time: Compact, weight_proof_size: Compact, class: u8, partial_fee: u128} let (_, _, _, partial_fee) = self .client - .rpc() - .state_call::<(Compact, Compact, u8, u128)>( + .backend() + .call_decoding::<(Compact, Compact, u8, u128)>( "TransactionPaymentApi_query_info", Some(¶ms), - None, + latest_block_ref.hash(), ) .await?; Ok(partial_fee) } } + +impl ValidationResult { + #[allow(clippy::get_first)] + fn try_from_bytes(bytes: Vec) -> Result { + // TaggedTransactionQueue_validate_transaction returns this: + // https://github.com/paritytech/substrate/blob/0cdf7029017b70b7c83c21a4dc0aa1020e7914f6/primitives/runtime/src/transaction_validity.rs#L210 + // We copy some of the inner types and put the three states (valid, invalid, unknown) into one enum, + // because from our perspective, the call was successful regardless. + if bytes.get(0) == Some(&0) { + // ok: valid. Decode but, for now we discard most of the information + let res = TransactionValid::decode(&mut &bytes[1..])?; + Ok(ValidationResult::Valid(res)) + } else if bytes.get(0) == Some(&1) && bytes.get(1) == Some(&0) { + // error: invalid + let res = TransactionInvalid::decode(&mut &bytes[2..])?; + Ok(ValidationResult::Invalid(res)) + } else if bytes.get(0) == Some(&1) && bytes.get(1) == Some(&1) { + // error: unknown + let res = TransactionUnknown::decode(&mut &bytes[2..])?; + Ok(ValidationResult::Unknown(res)) + } else { + // unable to decode the bytes; they aren't what we expect. + Err(crate::Error::Unknown(bytes)) + } + } +} + +/// The result of performing [`SubmittableExtrinsic::validate()`]. +#[derive(Clone, Debug, PartialEq)] +pub enum ValidationResult { + /// The transaction is valid + Valid(TransactionValid), + /// The transaction is invalid + Invalid(TransactionInvalid), + /// Unable to validate the transaction + Unknown(TransactionUnknown), +} + +impl ValidationResult { + /// Is the transaction valid. + pub fn is_valid(&self) -> bool { + matches!(self, ValidationResult::Valid(_)) + } +} + +/// Transaction is valid; here is some more information about it. +#[derive(Decode, Clone, Debug, PartialEq)] +pub struct TransactionValid { + /// Priority of the transaction. + /// + /// Priority determines the ordering of two transactions that have all + /// their dependencies (required tags) satisfied. + pub priority: u64, + /// Transaction dependencies + /// + /// A non-empty list signifies that some other transactions which provide + /// given tags are required to be included before that one. + pub requires: Vec>, + /// Provided tags + /// + /// A list of tags this transaction provides. Successfully importing the transaction + /// will enable other transactions that depend on (require) those tags to be included as well. + /// Provided and required tags allow Substrate to build a dependency graph of transactions + /// and import them in the right (linear) order. + pub provides: Vec>, + /// Transaction longevity + /// + /// Longevity describes minimum number of blocks the validity is correct. + /// After this period transaction should be removed from the pool or revalidated. + pub longevity: u64, + /// A flag indicating if the transaction should be propagated to other peers. + /// + /// By setting `false` here the transaction will still be considered for + /// including in blocks that are authored on the current node, but will + /// never be sent to other peers. + pub propagate: bool, +} + +/// The runtime was unable to validate the transaction. +#[derive(Decode, Clone, Debug, PartialEq)] +pub enum TransactionUnknown { + /// Could not lookup some information that is required to validate the transaction. + CannotLookup, + /// No validator found for the given unsigned transaction. + NoUnsignedValidator, + /// Any other custom unknown validity that is not covered by this enum. + Custom(u8), +} + +/// The transaction is invalid. +#[derive(Decode, Clone, Debug, PartialEq)] +pub enum TransactionInvalid { + /// The call of the transaction is not expected. + Call, + /// General error to do with the inability to pay some fees (e.g. account balance too low). + Payment, + /// General error to do with the transaction not yet being valid (e.g. nonce too high). + Future, + /// General error to do with the transaction being outdated (e.g. nonce too low). + Stale, + /// General error to do with the transaction's proofs (e.g. signature). + /// + /// # Possible causes + /// + /// When using a signed extension that provides additional data for signing, it is required + /// that the signing and the verifying side use the same additional data. Additional + /// data will only be used to generate the signature, but will not be part of the transaction + /// itself. As the verifying side does not know which additional data was used while signing + /// it will only be able to assume a bad signature and cannot express a more meaningful error. + BadProof, + /// The transaction birth block is ancient. + /// + /// # Possible causes + /// + /// For `FRAME`-based runtimes this would be caused by `current block number + /// - Era::birth block number > BlockHashCount`. (e.g. in Polkadot `BlockHashCount` = 2400, so + /// a + /// transaction with birth block number 1337 would be valid up until block number 1337 + 2400, + /// after which point the transaction would be considered to have an ancient birth block.) + AncientBirthBlock, + /// The transaction would exhaust the resources of current block. + /// + /// The transaction might be valid, but there are not enough resources + /// left in the current block. + ExhaustsResources, + /// Any other custom invalid validity that is not covered by this enum. + Custom(u8), + /// An extrinsic with a Mandatory dispatch resulted in Error. This is indicative of either a + /// malicious validator or a buggy `provide_inherent`. In any case, it can result in + /// dangerously overweight blocks and therefore if found, invalidates the block. + BadMandatory, + /// An extrinsic with a mandatory dispatch tried to be validated. + /// This is invalid; only inherent extrinsics are allowed to have mandatory dispatches. + MandatoryValidation, + /// The sending address is disabled or known to be invalid. + BadSigner, +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn transaction_validity_decoding_empty_bytes() { + // No panic should occur decoding empty bytes. + let decoded = ValidationResult::try_from_bytes(vec![]); + assert!(decoded.is_err()) + } + + #[test] + fn transaction_validity_decoding_is_ok() { + use sp_runtime::transaction_validity as sp; + use sp_runtime::transaction_validity::TransactionValidity as T; + + let pairs = vec![ + ( + T::Ok(sp::ValidTransaction { + ..Default::default() + }), + ValidationResult::Valid(TransactionValid { + // By default, tx is immortal + longevity: u64::MAX, + // Default is true + propagate: true, + priority: 0, + provides: vec![], + requires: vec![], + }), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::BadProof, + )), + ValidationResult::Invalid(TransactionInvalid::BadProof), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::Call, + )), + ValidationResult::Invalid(TransactionInvalid::Call), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::Payment, + )), + ValidationResult::Invalid(TransactionInvalid::Payment), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::Future, + )), + ValidationResult::Invalid(TransactionInvalid::Future), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::Stale, + )), + ValidationResult::Invalid(TransactionInvalid::Stale), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::AncientBirthBlock, + )), + ValidationResult::Invalid(TransactionInvalid::AncientBirthBlock), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::ExhaustsResources, + )), + ValidationResult::Invalid(TransactionInvalid::ExhaustsResources), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::BadMandatory, + )), + ValidationResult::Invalid(TransactionInvalid::BadMandatory), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::MandatoryValidation, + )), + ValidationResult::Invalid(TransactionInvalid::MandatoryValidation), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::BadSigner, + )), + ValidationResult::Invalid(TransactionInvalid::BadSigner), + ), + ( + T::Err(sp::TransactionValidityError::Invalid( + sp::InvalidTransaction::Custom(123), + )), + ValidationResult::Invalid(TransactionInvalid::Custom(123)), + ), + ( + T::Err(sp::TransactionValidityError::Unknown( + sp::UnknownTransaction::CannotLookup, + )), + ValidationResult::Unknown(TransactionUnknown::CannotLookup), + ), + ( + T::Err(sp::TransactionValidityError::Unknown( + sp::UnknownTransaction::NoUnsignedValidator, + )), + ValidationResult::Unknown(TransactionUnknown::NoUnsignedValidator), + ), + ( + T::Err(sp::TransactionValidityError::Unknown( + sp::UnknownTransaction::Custom(123), + )), + ValidationResult::Unknown(TransactionUnknown::Custom(123)), + ), + ]; + + for (sp, validation_result) in pairs { + let encoded = sp.encode(); + let decoded = ValidationResult::try_from_bytes(encoded).expect("should decode OK"); + assert_eq!(decoded, validation_result); + } + } +} diff --git a/subxt/src/tx/tx_progress.rs b/subxt/src/tx/tx_progress.rs index a93d6fefe6..002dde15cd 100644 --- a/subxt/src/tx/tx_progress.rs +++ b/subxt/src/tx/tx_progress.rs @@ -8,24 +8,32 @@ use std::task::Poll; use crate::utils::strip_compact_prefix; use crate::{ + backend::{StreamOfResults, TransactionStatus as BackendTxStatus}, client::OnlineClientT, error::{DispatchError, Error, RpcError, TransactionError}, events::EventsClient, - rpc::types::{Subscription, SubstrateTxStatus}, Config, }; use derivative::Derivative; use futures::{Stream, StreamExt}; /// This struct represents a subscription to the progress of some transaction. -#[derive(Derivative)] -#[derivative(Debug(bound = "C: std::fmt::Debug"))] pub struct TxProgress { - sub: Option>>, + sub: Option>>, ext_hash: T::Hash, client: C, } +impl std::fmt::Debug for TxProgress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TxProgress") + .field("sub", &"") + .field("ext_hash", &self.ext_hash) + .field("client", &"") + .finish() + } +} + // The above type is not `Unpin` by default unless the generic param `T` is, // so we manually make it clear that Unpin is actually fine regardless of `T` // (we don't care if this moves around in memory while it's "pinned"). @@ -34,7 +42,7 @@ impl Unpin for TxProgress {} impl TxProgress { /// Instantiate a new [`TxProgress`] from a custom subscription. pub fn new( - sub: Subscription>, + sub: StreamOfResults>, client: C, ext_hash: T::Hash, ) -> Self { @@ -59,8 +67,8 @@ where /// Return the next transaction status when it's emitted. This just delegates to the /// [`futures::Stream`] implementation for [`TxProgress`], but allows you to /// avoid importing that trait if you don't otherwise need it. - pub async fn next_item(&mut self) -> Option, Error>> { - self.next().await + pub async fn next(&mut self) -> Option, Error>> { + StreamExt::next(self).await } /// Wait for the transaction to be in a block (but not necessarily finalized), and return @@ -68,24 +76,25 @@ where /// waiting for this to happen. /// /// **Note:** consumes `self`. If you'd like to perform multiple actions as the state of the - /// transaction progresses, use [`TxProgress::next_item()`] instead. + /// transaction progresses, use [`TxProgress::next()`] instead. /// /// **Note:** transaction statuses like `Invalid`/`Usurped`/`Dropped` indicate with some /// probability that the transaction will not make it into a block but there is no guarantee /// that this is true. In those cases the stream is closed however, so you currently have no way to find /// out if they finally made it into a block or not. pub async fn wait_for_in_block(mut self) -> Result, Error> { - while let Some(status) = self.next_item().await { + while let Some(status) = self.next().await { match status? { // Finalized or otherwise in a block! Return. - TxStatus::InBlock(s) | TxStatus::Finalized(s) => return Ok(s), + TxStatus::InBestBlock(s) | TxStatus::InFinalizedBlock(s) => return Ok(s), // Error scenarios; return the error. - TxStatus::FinalityTimeout(_) => { - return Err(TransactionError::FinalityTimeout.into()); + TxStatus::Error { message } => return Err(TransactionError::Error(message).into()), + TxStatus::Invalid { message } => { + return Err(TransactionError::Invalid(message).into()) + } + TxStatus::Dropped { message } => { + return Err(TransactionError::Dropped(message).into()) } - TxStatus::Invalid => return Err(TransactionError::Invalid.into()), - TxStatus::Usurped(_) => return Err(TransactionError::Usurped.into()), - TxStatus::Dropped => return Err(TransactionError::Dropped.into()), // Ignore anything else and wait for next status event: _ => continue, } @@ -97,24 +106,25 @@ where /// instance when it is, or an error if there was a problem waiting for finalization. /// /// **Note:** consumes `self`. If you'd like to perform multiple actions as the state of the - /// transaction progresses, use [`TxProgress::next_item()`] instead. + /// transaction progresses, use [`TxProgress::next()`] instead. /// /// **Note:** transaction statuses like `Invalid`/`Usurped`/`Dropped` indicate with some /// probability that the transaction will not make it into a block but there is no guarantee /// that this is true. In those cases the stream is closed however, so you currently have no way to find /// out if they finally made it into a block or not. pub async fn wait_for_finalized(mut self) -> Result, Error> { - while let Some(status) = self.next_item().await { + while let Some(status) = self.next().await { match status? { // Finalized! Return. - TxStatus::Finalized(s) => return Ok(s), + TxStatus::InFinalizedBlock(s) => return Ok(s), // Error scenarios; return the error. - TxStatus::FinalityTimeout(_) => { - return Err(TransactionError::FinalityTimeout.into()); + TxStatus::Error { message } => return Err(TransactionError::Error(message).into()), + TxStatus::Invalid { message } => { + return Err(TransactionError::Invalid(message).into()) + } + TxStatus::Dropped { message } => { + return Err(TransactionError::Dropped(message).into()) } - TxStatus::Invalid => return Err(TransactionError::Invalid.into()), - TxStatus::Usurped(_) => return Err(TransactionError::Usurped.into()), - TxStatus::Dropped => return Err(TransactionError::Dropped.into()), // Ignore and wait for next status event: _ => continue, } @@ -127,7 +137,7 @@ where /// as well as a couple of other details (block hash and extrinsic hash). /// /// **Note:** consumes self. If you'd like to perform multiple actions as progress is made, - /// use [`TxProgress::next_item()`] instead. + /// use [`TxProgress::next()`] instead. /// /// **Note:** transaction statuses like `Invalid`/`Usurped`/`Dropped` indicate with some /// probability that the transaction will not make it into a block but there is no guarantee @@ -155,156 +165,84 @@ impl Stream for TxProgress { sub.poll_next_unpin(cx).map_ok(|status| { match status { - SubstrateTxStatus::Future => TxStatus::Future, - SubstrateTxStatus::Ready => TxStatus::Ready, - SubstrateTxStatus::Broadcast(peers) => TxStatus::Broadcast(peers), - SubstrateTxStatus::InBlock(hash) => { - TxStatus::InBlock(TxInBlock::new(hash, self.ext_hash, self.client.clone())) + BackendTxStatus::Validated => TxStatus::Validated, + BackendTxStatus::Broadcasted { num_peers } => TxStatus::Broadcasted { num_peers }, + BackendTxStatus::InBestBlock { hash } => { + TxStatus::InBestBlock(TxInBlock::new(hash, self.ext_hash, self.client.clone())) } - SubstrateTxStatus::Retracted(hash) => TxStatus::Retracted(hash), - // Only the following statuses are considered "final", in a sense that they end the stream (see the substrate - // docs on `TxStatus`): - // - // - Usurped - // - Finalized - // - FinalityTimeout - // - Invalid - // - Dropped - // - // Even though `Dropped`/`Invalid`/`Usurped` transactions might make it into a block eventually, - // the server considers them final and closes the connection, when they are encountered. - // In those cases the stream is closed however, so you currently have no way to find - // out if they finally made it into a block or not. - // - // As an example, a transaction that is `Invalid` on one node due to having the wrong - // nonce might still be valid on some fork on another node which ends up being finalized. - // Equally, a transaction `Dropped` from one node may still be in the transaction pool, - // and make it into a block, on another node. Likewise with `Usurped`. - SubstrateTxStatus::FinalityTimeout(hash) => { + // These stream events mean that nothing further will be sent: + BackendTxStatus::InFinalizedBlock { hash } => { self.sub = None; - TxStatus::FinalityTimeout(hash) + TxStatus::InFinalizedBlock(TxInBlock::new( + hash, + self.ext_hash, + self.client.clone(), + )) } - SubstrateTxStatus::Finalized(hash) => { + BackendTxStatus::Error { message } => { self.sub = None; - TxStatus::Finalized(TxInBlock::new(hash, self.ext_hash, self.client.clone())) + TxStatus::Error { message } } - SubstrateTxStatus::Usurped(hash) => { + BackendTxStatus::Invalid { message } => { self.sub = None; - TxStatus::Usurped(hash) + TxStatus::Invalid { message } } - SubstrateTxStatus::Dropped => { + BackendTxStatus::Dropped { message } => { self.sub = None; - TxStatus::Dropped - } - SubstrateTxStatus::Invalid => { - self.sub = None; - TxStatus::Invalid + TxStatus::Dropped { message } } } }) } } -//* Dev note: The below is adapted from the substrate docs on `TxStatus`, which this -//* enum was adapted from (and which is an exact copy of `SubstrateTxStatus` in this crate). -//* Note that the number of finality watchers is, at the time of writing, found in the constant -//* `MAX_FINALITY_WATCHERS` in the `sc_transaction_pool` crate. -//* -/// Possible transaction statuses returned from our [`TxProgress::next_item()`] call. -/// -/// These status events can be grouped based on their kinds as: -/// -/// 1. Entering/Moving within the pool: -/// - `Future` -/// - `Ready` -/// 2. Inside `Ready` queue: -/// - `Broadcast` -/// 3. Leaving the pool: -/// - `InBlock` -/// - `Invalid` -/// - `Usurped` -/// - `Dropped` -/// 4. Re-entering the pool: -/// - `Retracted` -/// 5. Block finalized: -/// - `Finalized` -/// - `FinalityTimeout` -/// -/// The events will always be received in the order described above, however -/// there might be cases where transactions alternate between `Future` and `Ready` -/// pool, and are `Broadcast` in the meantime. -/// -/// You are free to unsubscribe from notifications at any point. -/// The first one will be emitted when the block in which the transaction was included gets -/// finalized. The `FinalityTimeout` event will be emitted when the block did not reach finality -/// within 512 blocks. This either indicates that finality is not available for your chain, -/// or that finality gadget is lagging behind. -/// -/// Note that there are conditions that may cause transactions to reappear in the pool: -/// -/// 1. Due to possible forks, the transaction that ends up being included -/// in one block may later re-enter the pool or be marked as invalid. -/// 2. A transaction that is `Dropped` at one point may later re-enter the pool if -/// some other transactions are removed. -/// 3. `Invalid` transactions may become valid at some point in the future. -/// (Note that runtimes are encouraged to use `UnknownValidity` to inform the -/// pool about such cases). -/// 4. `Retracted` transactions might be included in a future block. -/// -/// Even though these cases can happen, the server-side of the stream is closed, if one of the following is encountered: -/// - Usurped -/// - Finalized -/// - FinalityTimeout -/// - Invalid -/// - Dropped -/// -/// In any of these cases the client side TxProgress stream is also closed. -/// In those cases the stream is closed however, so you currently have no way to find -/// out if they finally made it into a block or not. +/// Possible transaction statuses returned from our [`TxProgress::next()`] call. #[derive(Derivative)] #[derivative(Debug(bound = "C: std::fmt::Debug"))] pub enum TxStatus { - /// The transaction is part of the "future" queue. - Future, - /// The transaction is part of the "ready" queue. - Ready, - /// The transaction has been broadcast to the given peers. - Broadcast(Vec), - /// The transaction has been included in a block with given hash. - InBlock(TxInBlock), - /// The block this transaction was included in has been retracted, - /// probably because it did not make it onto the blocks which were - /// finalized. - Retracted(T::Hash), - /// A block containing the transaction did not reach finality within 512 - /// blocks, and so the subscription has ended. - FinalityTimeout(T::Hash), - /// The transaction has been finalized by a finality-gadget, e.g GRANDPA. - Finalized(TxInBlock), - /// The transaction has been replaced in the pool by another transaction - /// that provides the same tags. (e.g. same (sender, nonce)). - Usurped(T::Hash), - /// The transaction has been dropped from the pool because of the limit. - Dropped, - /// The transaction is no longer valid in the current state. - Invalid, + /// Transaction is part of the future queue. + Validated, + /// The transaction has been broadcast to other nodes. + Broadcasted { + /// Number of peers it's been broadcast to. + num_peers: u32, + }, + /// Transaction has been included in block with given hash. + InBestBlock(TxInBlock), + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA + InFinalizedBlock(TxInBlock), + /// Something went wrong in the node. + Error { + /// Human readable message; what went wrong. + message: String, + }, + /// Transaction is invalid (bad nonce, signature etc). + Invalid { + /// Human readable message; why was it invalid. + message: String, + }, + /// The transaction was dropped. + Dropped { + /// Human readable message; why was it dropped. + message: String, + }, } impl TxStatus { - /// A convenience method to return the `Finalized` details. Returns - /// [`None`] if the enum variant is not [`TxStatus::Finalized`]. + /// A convenience method to return the finalized details. Returns + /// [`None`] if the enum variant is not [`TxStatus::InFinalizedBlock`]. pub fn as_finalized(&self) -> Option<&TxInBlock> { match self { - Self::Finalized(val) => Some(val), + Self::InFinalizedBlock(val) => Some(val), _ => None, } } - /// A convenience method to return the `InBlock` details. Returns - /// [`None`] if the enum variant is not [`TxStatus::InBlock`]. + /// A convenience method to return the best block details. Returns + /// [`None`] if the enum variant is not [`TxStatus::InBestBlock`]. pub fn as_in_block(&self) -> Option<&TxInBlock> { match self { - Self::InBlock(val) => Some(val), + Self::InBestBlock(val) => Some(val), _ => None, } } @@ -376,20 +314,18 @@ impl> TxInBlock { /// **Note:** This has to download block details from the node and decode events /// from them. pub async fn fetch_events(&self) -> Result, Error> { - let block = self + let block_body = self .client - .rpc() - .block(Some(self.block_hash)) + .backend() + .block_body(self.block_hash) .await? .ok_or(Error::Transaction(TransactionError::BlockNotFound))?; - let extrinsic_idx = block - .block - .extrinsics + let extrinsic_idx = block_body .iter() .position(|ext| { use crate::config::Hasher; - let Ok((_, stripped)) = strip_compact_prefix(&ext.0) else { + let Ok((_,stripped)) = strip_compact_prefix(ext) else { return false; }; let hash = T::Hasher::hash_of(&stripped); @@ -413,23 +349,16 @@ impl> TxInBlock { #[cfg(test)] mod test { - use std::pin::Pin; - - use futures::Stream; - use crate::{ + backend::{StreamOfResults, TransactionStatus}, client::{OfflineClientT, OnlineClientT}, - error::RpcError, - rpc::{types::SubstrateTxStatus, RpcSubscription, Subscription}, tx::TxProgress, Config, Error, SubstrateConfig, }; - use serde_json::value::RawValue; - type MockTxProgress = TxProgress; type MockHash = ::Hash; - type MockSubstrateTxStatus = SubstrateTxStatus; + type MockSubstrateTxStatus = TransactionStatus; /// a mock client to satisfy trait bounds in tests #[derive(Clone, Debug)] @@ -444,49 +373,59 @@ mod test { unimplemented!("just a mock impl to satisfy trait bounds") } - fn runtime_version(&self) -> crate::rpc::types::RuntimeVersion { + fn runtime_version(&self) -> crate::backend::RuntimeVersion { unimplemented!("just a mock impl to satisfy trait bounds") } } impl OnlineClientT for MockClient { - fn rpc(&self) -> &crate::rpc::Rpc { + fn backend(&self) -> &dyn crate::backend::Backend { unimplemented!("just a mock impl to satisfy trait bounds") } } #[tokio::test] - async fn wait_for_finalized_returns_err_when_usurped() { + async fn wait_for_finalized_returns_err_when_error() { let tx_progress = mock_tx_progress(vec![ - SubstrateTxStatus::Ready, - SubstrateTxStatus::Usurped(Default::default()), + MockSubstrateTxStatus::Broadcasted { num_peers: 2 }, + MockSubstrateTxStatus::Error { + message: "err".into(), + }, ]); let finalized_result = tx_progress.wait_for_finalized().await; assert!(matches!( finalized_result, - Err(Error::Transaction(crate::error::TransactionError::Usurped)) - )); - } - - #[tokio::test] - async fn wait_for_finalized_returns_err_when_dropped() { - let tx_progress = - mock_tx_progress(vec![SubstrateTxStatus::Ready, SubstrateTxStatus::Dropped]); - let finalized_result = tx_progress.wait_for_finalized().await; - assert!(matches!( - finalized_result, - Err(Error::Transaction(crate::error::TransactionError::Dropped)) + Err(Error::Transaction(crate::error::TransactionError::Error(e))) if e == "err" )); } #[tokio::test] async fn wait_for_finalized_returns_err_when_invalid() { - let tx_progress = - mock_tx_progress(vec![SubstrateTxStatus::Ready, SubstrateTxStatus::Invalid]); + let tx_progress = mock_tx_progress(vec![ + MockSubstrateTxStatus::Broadcasted { num_peers: 2 }, + MockSubstrateTxStatus::Invalid { + message: "err".into(), + }, + ]); let finalized_result = tx_progress.wait_for_finalized().await; assert!(matches!( finalized_result, - Err(Error::Transaction(crate::error::TransactionError::Invalid)) + Err(Error::Transaction(crate::error::TransactionError::Invalid(e))) if e == "err" + )); + } + + #[tokio::test] + async fn wait_for_finalized_returns_err_when_dropped() { + let tx_progress = mock_tx_progress(vec![ + MockSubstrateTxStatus::Broadcasted { num_peers: 2 }, + MockSubstrateTxStatus::Dropped { + message: "err".into(), + }, + ]); + let finalized_result = tx_progress.wait_for_finalized().await; + assert!(matches!( + finalized_result, + Err(Error::Transaction(crate::error::TransactionError::Dropped(e))) if e == "err" )); } @@ -497,21 +436,10 @@ mod test { fn create_substrate_tx_status_subscription( elements: Vec, - ) -> Subscription { - let rpc_substription_stream: Pin< - Box, RpcError>> + Send + 'static>, - > = Box::pin(futures::stream::iter(elements.into_iter().map(|e| { - let s = serde_json::to_string(&e).unwrap(); - let r: Box = RawValue::from_string(s).unwrap(); - Ok(r) - }))); - - let rpc_subscription: RpcSubscription = RpcSubscription { - stream: rpc_substription_stream, - id: None, - }; - - let sub: Subscription = Subscription::new(rpc_subscription); + ) -> StreamOfResults { + let results = elements.into_iter().map(Ok); + let stream = Box::pin(futures::stream::iter(results)); + let sub: StreamOfResults = StreamOfResults::new(stream); sub } } diff --git a/testing/integration-tests/Cargo.toml b/testing/integration-tests/Cargo.toml index 868763cbd5..099b93382e 100644 --- a/testing/integration-tests/Cargo.toml +++ b/testing/integration-tests/Cargo.toml @@ -38,4 +38,3 @@ tracing = { workspace = true } tracing-subscriber = { workspace = true } wabt = { workspace = true } substrate-runner = { workspace = true } -sp-runtime = { workspace = true } diff --git a/testing/integration-tests/src/full_client/blocks/mod.rs b/testing/integration-tests/src/full_client/blocks/mod.rs index 141cbf09e8..ff473a7574 100644 --- a/testing/integration-tests/src/full_client/blocks/mod.rs +++ b/testing/integration-tests/src/full_client/blocks/mod.rs @@ -22,7 +22,7 @@ async fn non_finalized_headers_subscription() -> Result<(), subxt::Error> { // (this can be a bit slow as we have to wait for finalization) let header = sub.next().await.unwrap()?; let block_hash = header.hash(); - let current_block_hash = api.rpc().block_hash(None).await?.unwrap(); + let current_block_hash = api.backend().latest_best_block_ref().await?.hash(); assert_eq!(block_hash, current_block_hash); Ok(()) @@ -40,7 +40,7 @@ async fn finalized_headers_subscription() -> Result<(), subxt::Error> { // associated block hash is the one we just finalized. // (this can be a bit slow as we have to wait for finalization) let header = sub.next().await.unwrap()?; - let finalized_hash = api.rpc().finalized_head().await?; + let finalized_hash = api.backend().latest_finalized_block_ref().await?.hash(); assert_eq!(header.hash(), finalized_hash); Ok(()) @@ -48,15 +48,16 @@ async fn finalized_headers_subscription() -> Result<(), subxt::Error> { #[tokio::test] async fn missing_block_headers_will_be_filled_in() -> Result<(), subxt::Error> { + use subxt::backend::legacy; + let ctx = test_context().await; - let api = ctx.client(); + let rpc = ctx.legacy_rpc_methods().await; // Manually subscribe to the next 6 finalized block headers, but deliberately // filter out some in the middle so we get back b _ _ b _ b. This guarantees // that there will be some gaps, even if there aren't any from the subscription. - let some_finalized_blocks = api - .rpc() - .subscribe_finalized_block_headers() + let some_finalized_blocks = rpc + .chain_subscribe_finalized_heads() .await? .enumerate() .take(6) @@ -64,14 +65,11 @@ async fn missing_block_headers_will_be_filled_in() -> Result<(), subxt::Error> { let n = *n; async move { n == 0 || n == 3 || n == 5 } }) - .map(|(_, h)| h); + .map(|(_, r)| r); // This should spot any gaps in the middle and fill them back in. - let all_finalized_blocks = subxt::blocks::subscribe_to_block_headers_filling_in_gaps( - ctx.client(), - None, - some_finalized_blocks, - ); + let all_finalized_blocks = + legacy::subscribe_to_block_headers_filling_in_gaps(rpc, some_finalized_blocks, None); futures::pin_mut!(all_finalized_blocks); // Iterate the block headers, making sure we get them all in order. @@ -97,6 +95,7 @@ async fn missing_block_headers_will_be_filled_in() -> Result<(), subxt::Error> { async fn runtime_api_call() -> Result<(), subxt::Error> { let ctx = test_context().await; let api = ctx.client(); + let rpc = ctx.legacy_rpc_methods().await; let mut sub = api.blocks().subscribe_best().await?; @@ -109,7 +108,7 @@ async fn runtime_api_call() -> Result<(), subxt::Error> { .await?; // get metadata via `state_getMetadata`. - let meta2 = api.rpc().metadata_legacy(None).await?; + let meta2 = rpc.state_get_metadata(Some(block.hash())).await?; // They should be the same. assert_eq!(meta1.encode(), meta2.encode()); @@ -147,7 +146,7 @@ async fn decode_extrinsics() { let block_hash = in_block.block_hash(); let block = BlocksClient::new(api).at(block_hash).await.unwrap(); - let extrinsics = block.body().await.unwrap().extrinsics(); + let extrinsics = block.extrinsics().await.unwrap(); assert_eq!(extrinsics.len(), 2); assert_eq!(extrinsics.block_hash(), block_hash); diff --git a/testing/integration-tests/src/full_client/client/legacy_rpcs.rs b/testing/integration-tests/src/full_client/client/legacy_rpcs.rs new file mode 100644 index 0000000000..343079d14b --- /dev/null +++ b/testing/integration-tests/src/full_client/client/legacy_rpcs.rs @@ -0,0 +1,127 @@ +// Copyright 2019-2023 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Just sanity checking some of the legacy RPC methods to make +//! sure they don't error out and can decode their results OK. + +use crate::test_context; + +#[tokio::test] +async fn chain_get_block_hash() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + rpc.chain_get_block_hash(None).await.unwrap(); +} + +#[tokio::test] +async fn chain_get_block() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let hash = rpc.chain_get_block_hash(None).await.unwrap(); + rpc.chain_get_block(hash).await.unwrap(); +} + +#[tokio::test] +async fn chain_get_finalized_head() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + rpc.chain_get_finalized_head().await.unwrap(); +} + +#[tokio::test] +async fn chain_subscribe_all_heads() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let mut sub = rpc.chain_subscribe_all_heads().await.unwrap(); + let _block_header = sub.next().await.unwrap().unwrap(); +} + +#[tokio::test] +async fn chain_subscribe_finalized_heads() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let mut sub = rpc.chain_subscribe_finalized_heads().await.unwrap(); + let _block_header = sub.next().await.unwrap().unwrap(); +} + +#[tokio::test] +async fn chain_subscribe_new_heads() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let mut sub = rpc.chain_subscribe_new_heads().await.unwrap(); + let _block_header = sub.next().await.unwrap().unwrap(); +} + +#[tokio::test] +async fn genesis_hash() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let _genesis_hash = rpc.genesis_hash().await.unwrap(); +} + +#[tokio::test] +async fn state_get_metadata() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let _metadata = rpc.state_get_metadata(None).await.unwrap(); +} + +#[tokio::test] +async fn state_call() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let _metadata = rpc + .state_call("Metadata_metadata", None, None) + .await + .unwrap(); +} + +#[tokio::test] +async fn system_health() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let _ = rpc.system_health().await.unwrap(); +} + +#[tokio::test] +async fn system_chain() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let _ = rpc.system_chain().await.unwrap(); +} + +#[tokio::test] +async fn system_name() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let _ = rpc.system_name().await.unwrap(); +} + +#[tokio::test] +async fn system_version() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let _ = rpc.system_version().await.unwrap(); +} + +#[tokio::test] +async fn system_properties() { + let ctx = test_context().await; + let rpc = ctx.legacy_rpc_methods().await; + + let _ = rpc.system_properties().await.unwrap(); +} diff --git a/testing/integration-tests/src/full_client/client/mod.rs b/testing/integration-tests/src/full_client/client/mod.rs index 9c6886f62b..d30b4bdb3d 100644 --- a/testing/integration-tests/src/full_client/client/mod.rs +++ b/testing/integration-tests/src/full_client/client/mod.rs @@ -3,181 +3,64 @@ // see LICENSE for license details. use crate::{ - test_context, test_context_with, + test_context, utils::{node_runtime, wait_for_blocks}, }; -use assert_matches::assert_matches; -use codec::{Compact, Decode, Encode}; -use sp_core::storage::well_known_keys; -use sp_runtime::DeserializeOwned; +use codec::{Decode, Encode}; +use futures::StreamExt; use subxt::{ - error::{DispatchError, Error, TokenError}, - rpc::{ - types::{ - DryRunResult, DryRunResultBytes, FollowEvent, Initialized, MethodResponse, - RuntimeEvent, RuntimeVersionEvent, StorageQuery, StorageQueryType, - }, - Subscription, - }, - utils::AccountId32, + backend::BackendExt, + error::{DispatchError, Error}, + tx::{TransactionInvalid, ValidationResult}, }; -use subxt_metadata::Metadata; use subxt_signer::sr25519::dev; -/// Ignore block related events and obtain the next event related to an operation. -async fn next_operation_event( - sub: &mut Subscription>, -) -> FollowEvent { - // At most 5 retries. - for _ in 0..5 { - let event = sub.next().await.unwrap().unwrap(); - - match event { - // Can also return the `Stop` event for better debugging. - FollowEvent::Initialized(_) - | FollowEvent::NewBlock(_) - | FollowEvent::BestBlockChanged(_) - | FollowEvent::Finalized(_) => continue, - _ => (), - }; - - return event; - } - - panic!("Cannot find operation related event after 5 produced events"); -} +mod legacy_rpcs; #[tokio::test] -async fn insert_key() { - let ctx = test_context_with("bob".to_string()).await; - let api = ctx.client(); - - let public = dev::alice().public_key().as_ref().to_vec(); - api.rpc() - .insert_key( - "aura".to_string(), - "//Alice".to_string(), - public.clone().into(), - ) - .await - .unwrap(); - assert!(api - .rpc() - .has_key(public.clone().into(), "aura".to_string()) - .await - .unwrap()); -} - -#[tokio::test] -async fn fetch_block_hash() { - let ctx = test_context().await; - ctx.client().rpc().block_hash(None).await.unwrap(); -} - -#[tokio::test] -async fn fetch_block() { - let ctx = test_context().await; - let api = ctx.client(); - - let block_hash = api.rpc().block_hash(None).await.unwrap(); - api.rpc().block(block_hash).await.unwrap(); -} - -#[tokio::test] -async fn fetch_read_proof() { - let ctx = test_context().await; - let api = ctx.client(); - - let block_hash = api.rpc().block_hash(None).await.unwrap(); - api.rpc() - .read_proof( - vec![ - well_known_keys::HEAP_PAGES, - well_known_keys::EXTRINSIC_INDEX, - ], - block_hash, - ) - .await - .unwrap(); -} - -#[tokio::test] -async fn chain_subscribe_all_blocks() { - let ctx = test_context().await; - let api = ctx.client(); - - let mut blocks = api.rpc().subscribe_all_block_headers().await.unwrap(); - blocks.next().await.unwrap().unwrap(); -} - -#[tokio::test] -async fn chain_subscribe_best_blocks() { - let ctx = test_context().await; - let api = ctx.client(); - - let mut blocks = api.rpc().subscribe_best_block_headers().await.unwrap(); - blocks.next().await.unwrap().unwrap(); -} - -#[tokio::test] -async fn chain_subscribe_finalized_blocks() { - let ctx = test_context().await; - let api = ctx.client(); - - let mut blocks = api.rpc().subscribe_finalized_block_headers().await.unwrap(); - blocks.next().await.unwrap().unwrap(); -} - -#[tokio::test] -async fn fetch_keys() { +async fn storage_fetch_raw_keys() { let ctx = test_context().await; let api = ctx.client(); let addr = node_runtime::storage().system().account_iter(); - let keys = api + let len = api .storage() .at_latest() .await .unwrap() - .fetch_keys(&addr.to_root_bytes(), 4, None) + .fetch_raw_keys(addr.to_root_bytes()) .await - .unwrap(); - assert_eq!(keys.len(), 4) + .unwrap() + .filter_map(|r| async move { r.ok() }) + .count() + .await; + + assert_eq!(len, 13) } #[tokio::test] -async fn test_iter() { +async fn storage_iter() { let ctx = test_context().await; let api = ctx.client(); let addr = node_runtime::storage().system().account_iter(); - let mut iter = api + let len = api .storage() .at_latest() .await .unwrap() - .iter(addr, 10) + .iter(addr) .await - .unwrap(); - let mut i = 0; - while iter.next().await.unwrap().is_some() { - i += 1; - } - assert_eq!(i, 13); + .unwrap() + .filter_map(|r| async move { r.ok() }) + .count() + .await; + + assert_eq!(len, 13); } #[tokio::test] -async fn fetch_system_info() { - let ctx = test_context().await; - let api = ctx.client(); - - assert_eq!(api.rpc().system_chain().await.unwrap(), "Development"); - assert_eq!(api.rpc().system_name().await.unwrap(), "Substrate Node"); - assert!(!api.rpc().system_version().await.unwrap().is_empty()); -} - -#[tokio::test] -async fn dry_run_passes() { +async fn transaction_validation() { let ctx = test_context().await; let api = ctx.client(); @@ -197,9 +80,9 @@ async fn dry_run_passes() { .unwrap(); signed_extrinsic - .dry_run(None) + .validate() .await - .expect("dryrunning failed"); + .expect("validation failed"); signed_extrinsic .submit_and_watch() @@ -211,109 +94,37 @@ async fn dry_run_passes() { } #[tokio::test] -async fn dry_run_fails() { +async fn validation_fails() { + use std::str::FromStr; + use subxt_signer::{sr25519::Keypair, SecretUri}; + let ctx = test_context().await; let api = ctx.client(); wait_for_blocks(&api).await; - let alice = dev::alice(); - let bob = dev::bob(); + let from = Keypair::from_uri(&SecretUri::from_str("//AccountWithNoFunds").unwrap()).unwrap(); + let to = dev::bob(); - let tx = node_runtime::tx().balances().transfer( - bob.public_key().into(), - // 7 more than the default amount Alice has, so this should fail; insufficient funds: - 1_000_000_000_000_000_000_007, - ); + // The actual TX is not important; the account has no funds to pay for it. + let tx = node_runtime::tx() + .balances() + .transfer(to.public_key().into(), 1); let signed_extrinsic = api .tx() - .create_signed(&tx, &alice, Default::default()) + .create_signed(&tx, &from, Default::default()) .await .unwrap(); - let dry_run_res = signed_extrinsic - .dry_run(None) + let validation_res = signed_extrinsic + .validate() .await .expect("dryrunning failed"); - assert_eq!( - dry_run_res, - DryRunResult::DispatchError(DispatchError::Token(TokenError::FundsUnavailable)) + validation_res, + ValidationResult::Invalid(TransactionInvalid::Payment) ); - - let res = signed_extrinsic - .submit_and_watch() - .await - .unwrap() - .wait_for_finalized_success() - .await; - - assert!( - matches!( - res, - Err(Error::Runtime(DispatchError::Token( - TokenError::FundsUnavailable - ))) - ), - "Expected an insufficient balance, got {res:?}" - ); -} - -#[tokio::test] -async fn dry_run_result_is_substrate_compatible() { - use sp_runtime::{ - transaction_validity::{ - InvalidTransaction as SpInvalidTransaction, - TransactionValidityError as SpTransactionValidityError, - }, - ApplyExtrinsicResult as SpApplyExtrinsicResult, DispatchError as SpDispatchError, - TokenError as SpTokenError, - }; - - // We really just connect to a node to get some valid metadata to help us - // decode Dispatch Errors. - let ctx = test_context().await; - let api = ctx.client(); - - let pairs = vec![ - // All ok - (SpApplyExtrinsicResult::Ok(Ok(())), DryRunResult::Success), - // Some transaction error - ( - SpApplyExtrinsicResult::Err(SpTransactionValidityError::Invalid( - SpInvalidTransaction::BadProof, - )), - DryRunResult::TransactionValidityError, - ), - // Some dispatch errors to check that they decode OK. We've tested module errors - // "in situ" in other places so avoid the complexity of testing them properly here. - ( - SpApplyExtrinsicResult::Ok(Err(SpDispatchError::Other("hi"))), - DryRunResult::DispatchError(DispatchError::Other), - ), - ( - SpApplyExtrinsicResult::Ok(Err(SpDispatchError::CannotLookup)), - DryRunResult::DispatchError(DispatchError::CannotLookup), - ), - ( - SpApplyExtrinsicResult::Ok(Err(SpDispatchError::BadOrigin)), - DryRunResult::DispatchError(DispatchError::BadOrigin), - ), - ( - SpApplyExtrinsicResult::Ok(Err(SpDispatchError::Token(SpTokenError::CannotCreate))), - DryRunResult::DispatchError(DispatchError::Token(TokenError::CannotCreate)), - ), - ]; - - for (actual, expected) in pairs { - let encoded = actual.encode(); - let res = DryRunResultBytes(encoded) - .into_dry_run_result(&api.metadata()) - .unwrap(); - - assert_eq!(res, expected); - } } #[tokio::test] @@ -442,227 +253,28 @@ async fn unsigned_extrinsic_is_same_shape_as_polkadotjs() { } #[tokio::test] -async fn rpc_state_call() -> Result<(), subxt::Error> { +async fn extrinsic_hash_is_same_as_returned() { let ctx = test_context().await; let api = ctx.client(); + let rpc = ctx.legacy_rpc_methods().await; - // get metadata via state_call. - let (_, meta1) = api - .rpc() - .state_call::<(Compact, Metadata)>("Metadata_metadata", None, None) - .await?; + let payload = node_runtime::tx() + .balances() + .transfer(dev::alice().public_key().into(), 12345000000000000); - // get metadata via `state_getMetadata`. - let meta2 = api.rpc().metadata_legacy(None).await?; - - // They should be the same. - assert_eq!(meta1.encode(), meta2.encode()); - - Ok(()) -} - -#[tokio::test] -async fn chainhead_unstable_follow() { - let ctx = test_context().await; - let api = ctx.client(); - - // Check subscription with runtime updates set on false. - let mut blocks = api.rpc().chainhead_unstable_follow(false).await.unwrap(); - let event = blocks.next().await.unwrap().unwrap(); - // The initialized event should contain the finalized block hash. - let finalized_block_hash = api.rpc().finalized_head().await.unwrap(); - assert_eq!( - event, - FollowEvent::Initialized(Initialized { - finalized_block_hash, - finalized_block_runtime: None, - }) - ); - - // Expect subscription to produce runtime versions. - let mut blocks = api.rpc().chainhead_unstable_follow(true).await.unwrap(); - let event = blocks.next().await.unwrap().unwrap(); - // The initialized event should contain the finalized block hash. - let finalized_block_hash = api.rpc().finalized_head().await.unwrap(); - let runtime_version = ctx.client().runtime_version(); - - assert_matches!( - event, - FollowEvent::Initialized(init) => { - assert_eq!(init.finalized_block_hash, finalized_block_hash); - assert_eq!(init.finalized_block_runtime, Some(RuntimeEvent::Valid(RuntimeVersionEvent { - spec: runtime_version, - }))); - } - ); -} - -#[tokio::test] -async fn chainhead_unstable_body() { - let ctx = test_context().await; - let api = ctx.client(); - - let mut blocks = api.rpc().chainhead_unstable_follow(false).await.unwrap(); - let event = blocks.next().await.unwrap().unwrap(); - let hash = match event { - FollowEvent::Initialized(init) => init.finalized_block_hash, - _ => panic!("Unexpected event"), - }; - let sub_id = blocks.subscription_id().unwrap().clone(); - - // Fetch the block's body. - let response = api - .rpc() - .chainhead_unstable_body(sub_id, hash) + let tx = api + .tx() + .create_signed(&payload, &dev::bob(), Default::default()) .await .unwrap(); - let operation_id = match response { - MethodResponse::Started(started) => started.operation_id, - MethodResponse::LimitReached => panic!("Expected started response"), - }; - // Response propagated to `chainHead_follow`. - let event = next_operation_event(&mut blocks).await; - assert_matches!( - event, - FollowEvent::OperationBodyDone(done) if done.operation_id == operation_id - ); -} + // 1. Calculate the hash locally: + let local_hash = tx.hash(); -#[tokio::test] -async fn chainhead_unstable_header() { - let ctx = test_context().await; - let api = ctx.client(); + // 2. Submit and get the hash back from the node: + let external_hash = rpc.author_submit_extrinsic(tx.encoded()).await.unwrap(); - let mut blocks = api.rpc().chainhead_unstable_follow(false).await.unwrap(); - let event = blocks.next().await.unwrap().unwrap(); - let hash = match event { - FollowEvent::Initialized(init) => init.finalized_block_hash, - _ => panic!("Unexpected event"), - }; - let sub_id = blocks.subscription_id().unwrap().clone(); - - let header = api.rpc().header(Some(hash)).await.unwrap().unwrap(); - let expected = format!("0x{}", hex::encode(header.encode())); - - let header = api - .rpc() - .chainhead_unstable_header(sub_id, hash) - .await - .unwrap() - .unwrap(); - - assert_eq!(header, expected); -} - -#[tokio::test] -async fn chainhead_unstable_storage() { - let ctx = test_context().await; - let api = ctx.client(); - - let mut blocks = api.rpc().chainhead_unstable_follow(false).await.unwrap(); - let event = blocks.next().await.unwrap().unwrap(); - let hash = match event { - FollowEvent::Initialized(init) => init.finalized_block_hash, - _ => panic!("Unexpected event"), - }; - let sub_id = blocks.subscription_id().unwrap().clone(); - - let alice: AccountId32 = dev::alice().public_key().into(); - let addr = node_runtime::storage().system().account(alice); - let addr_bytes = api.storage().address_bytes(&addr).unwrap(); - - let items = vec![StorageQuery { - key: addr_bytes.as_slice(), - query_type: StorageQueryType::Value, - }]; - - // Fetch storage. - let response = api - .rpc() - .chainhead_unstable_storage(sub_id, hash, items, None) - .await - .unwrap(); - let operation_id = match response { - MethodResponse::Started(started) => started.operation_id, - MethodResponse::LimitReached => panic!("Expected started response"), - }; - - // Response propagated to `chainHead_follow`. - let event = next_operation_event(&mut blocks).await; - assert_matches!( - event, - FollowEvent::OperationStorageItems(res) if res.operation_id == operation_id && - res.items.len() == 1 && - res.items[0].key == format!("0x{}", hex::encode(addr_bytes)) - ); - - let event = next_operation_event(&mut blocks).await; - assert_matches!(event, FollowEvent::OperationStorageDone(res) if res.operation_id == operation_id); -} - -#[tokio::test] -async fn chainhead_unstable_call() { - let ctx = test_context().await; - let api = ctx.client(); - - let mut blocks = api.rpc().chainhead_unstable_follow(true).await.unwrap(); - let event = blocks.next().await.unwrap().unwrap(); - let hash = match event { - FollowEvent::Initialized(init) => init.finalized_block_hash, - _ => panic!("Unexpected event"), - }; - let sub_id = blocks.subscription_id().unwrap().clone(); - - let alice_id = dev::alice().public_key().to_account_id(); - // Runtime API call. - let response = api - .rpc() - .chainhead_unstable_call( - sub_id, - hash, - "AccountNonceApi_account_nonce".into(), - &alice_id.encode(), - ) - .await - .unwrap(); - let operation_id = match response { - MethodResponse::Started(started) => started.operation_id, - MethodResponse::LimitReached => panic!("Expected started response"), - }; - - // Response propagated to `chainHead_follow`. - let event = next_operation_event(&mut blocks).await; - assert_matches!( - event, - FollowEvent::OperationCallDone(res) if res.operation_id == operation_id - ); -} - -#[tokio::test] -async fn chainhead_unstable_unpin() { - let ctx = test_context().await; - let api = ctx.client(); - - let mut blocks = api.rpc().chainhead_unstable_follow(true).await.unwrap(); - let event = blocks.next().await.unwrap().unwrap(); - let hash = match event { - FollowEvent::Initialized(init) => init.finalized_block_hash, - _ => panic!("Unexpected event"), - }; - let sub_id = blocks.subscription_id().unwrap().clone(); - - assert!(api - .rpc() - .chainhead_unstable_unpin(sub_id.clone(), hash) - .await - .is_ok()); - // The block was already unpinned. - assert!(api - .rpc() - .chainhead_unstable_unpin(sub_id, hash) - .await - .is_err()); + assert_eq!(local_hash, external_hash); } /// taken from original type @@ -713,6 +325,7 @@ async fn partial_fee_estimate_correct() { let partial_fee_1 = signed_extrinsic.partial_fee_estimate().await.unwrap(); // Method II: TransactionPaymentApi_query_fee_details + calculations + let latest_block_ref = api.backend().latest_best_block_ref().await.unwrap(); let len_bytes: [u8; 4] = (signed_extrinsic.encoded().len() as u32).to_le_bytes(); let encoded_with_len = [signed_extrinsic.encoded(), &len_bytes[..]].concat(); let InclusionFee { @@ -720,11 +333,11 @@ async fn partial_fee_estimate_correct() { len_fee, adjusted_weight_fee, } = api - .rpc() - .state_call::( + .backend() + .call_decoding::( "TransactionPaymentApi_query_fee_details", Some(&encoded_with_len), - None, + latest_block_ref.hash(), ) .await .unwrap() diff --git a/testing/integration-tests/src/full_client/frame/contracts.rs b/testing/integration-tests/src/full_client/frame/contracts.rs index 47d3bea9e0..fcd0ca9663 100644 --- a/testing/integration-tests/src/full_client/frame/contracts.rs +++ b/testing/integration-tests/src/full_client/frame/contracts.rs @@ -11,6 +11,7 @@ use crate::{ }, test_context, TestContext, }; +use subxt::ext::futures::StreamExt; use subxt::{tx::TxProgress, utils::MultiAddress, Config, Error, OnlineClient, SubstrateConfig}; use subxt_signer::sr25519::{self, dev}; @@ -202,8 +203,7 @@ async fn tx_call() { let info_addr = node_runtime::storage() .contracts() .contract_info_of(&contract); - - let info_addr_bytes = cxt.client().storage().address_bytes(&info_addr).unwrap(); + let info_addr_iter = node_runtime::storage().contracts().contract_info_of_iter(); let contract_info = cxt .client() @@ -215,19 +215,20 @@ async fn tx_call() { .await; assert!(contract_info.is_ok()); - let keys = cxt + let keys_and_values = cxt .client() .storage() .at_latest() .await .unwrap() - .fetch_keys(&info_addr_bytes, 10, None) + .iter(info_addr_iter) .await .unwrap() - .iter() - .map(|key| hex::encode(&key.0)) - .collect::>(); - println!("keys post: {keys:?}"); + .collect::>() + .await; + + assert_eq!(keys_and_values.len(), 1); + println!("keys+values post: {keys_and_values:?}"); let executed = cxt.call(contract, vec![]).await; diff --git a/testing/integration-tests/src/lib.rs b/testing/integration-tests/src/lib.rs index 90b024b4bb..8c13448893 100644 --- a/testing/integration-tests/src/lib.rs +++ b/testing/integration-tests/src/lib.rs @@ -21,6 +21,8 @@ mod light_client; use test_runtime::node_runtime; // These dependencies are used for the full client. +#[cfg(all(test, feature = "unstable-light-client"))] +use futures as _; #[cfg(all(test, not(feature = "unstable-light-client")))] use regex as _; #[cfg(all(test, not(feature = "unstable-light-client")))] diff --git a/testing/integration-tests/src/light_client/mod.rs b/testing/integration-tests/src/light_client/mod.rs index 8d446e140d..6579940dc5 100644 --- a/testing/integration-tests/src/light_client/mod.rs +++ b/testing/integration-tests/src/light_client/mod.rs @@ -28,12 +28,10 @@ //! use crate::utils::node_runtime; -use codec::{Compact, Encode}; -use futures::StreamExt; +use codec::Compact; use subxt::{ client::{LightClient, LightClientBuilder, OnlineClientT}, config::PolkadotConfig, - rpc::types::FollowEvent, }; use subxt_metadata::Metadata; @@ -44,7 +42,6 @@ use hex as _; use regex as _; use scale_info as _; use sp_core as _; -use sp_runtime as _; use subxt_codegen as _; use subxt_signer as _; use syn as _; @@ -58,7 +55,7 @@ async fn non_finalized_headers_subscription(api: &Client) -> Result<(), subxt::E let mut sub = api.blocks().subscribe_best().await?; let header = sub.next().await.unwrap()?; let block_hash = header.hash(); - let current_block_hash = api.rpc().block_hash(None).await?.unwrap(); + let current_block_hash = api.backend().latest_best_block_ref().await.unwrap().hash(); assert_eq!(block_hash, current_block_hash); @@ -73,7 +70,12 @@ async fn non_finalized_headers_subscription(api: &Client) -> Result<(), subxt::E async fn finalized_headers_subscription(api: &Client) -> Result<(), subxt::Error> { let mut sub = api.blocks().subscribe_finalized().await?; let header = sub.next().await.unwrap()?; - let finalized_hash = api.rpc().finalized_head().await?; + let finalized_hash = api + .backend() + .latest_finalized_block_ref() + .await + .unwrap() + .hash(); assert_eq!(header.hash(), finalized_hash); @@ -91,17 +93,11 @@ async fn runtime_api_call(api: &Client) -> Result<(), subxt::Error> { let block = sub.next().await.unwrap()?; let rt = block.runtime_api().await?; - // get metadata via state_call. - let (_, meta1) = rt + // get metadata via state_call. if it decodes ok, it's probably all good. + let _ = rt .call_raw::<(Compact, Metadata)>("Metadata_metadata", None) .await?; - // get metadata via `state_getMetadata`. - let meta2 = api.rpc().metadata_legacy(None).await?; - - // They should be the same. - assert_eq!(meta1.encode(), meta2.encode()); - Ok(()) } @@ -114,42 +110,12 @@ async fn storage_plain_lookup(api: &Client) -> Result<(), subxt::Error> { .await? .fetch_or_default(&addr) .await?; + assert!(entry > 0); Ok(()) } -// Subscribe to produced blocks using the `ChainHead` spec V2 and fetch the header of -// just a few reported blocks. -async fn follow_chain_head(api: &Client) -> Result<(), subxt::Error> { - let mut blocks = api.rpc().chainhead_unstable_follow(false).await?; - let sub_id = blocks - .subscription_id() - .expect("RPC provides a valid subscription id; qed") - .to_owned(); - - let event = blocks.next().await.unwrap()?; - if let FollowEvent::BestBlockChanged(best_block) = event { - let hash = best_block.best_block_hash; - let _header = api - .rpc() - .chainhead_unstable_header(sub_id.clone(), hash) - .await? - .unwrap(); - } - - let event = blocks.next().await.unwrap()?; - if let FollowEvent::BestBlockChanged(best_block) = event { - let hash = best_block.best_block_hash; - let _header = api - .rpc() - .chainhead_unstable_header(sub_id.clone(), hash) - .await? - .unwrap(); - } - Ok(()) -} - // Make a dynamic constant query for `System::BlockLenght`. async fn dynamic_constant_query(api: &Client) -> Result<(), subxt::Error> { let constant_query = subxt::dynamic::constant("System", "BlockLength"); @@ -169,15 +135,6 @@ async fn dynamic_events(api: &Client) -> Result<(), subxt::Error> { Ok(()) } -// Make a few raw RPC calls to the chain. -async fn various_rpc_calls(api: &Client) -> Result<(), subxt::Error> { - let _system_chain = api.rpc().system_chain().await?; - let _system_name = api.rpc().system_name().await?; - let _finalized_hash = api.rpc().finalized_head().await?; - - Ok(()) -} - #[tokio::test] async fn light_client_testing() -> Result<(), subxt::Error> { let api: LightClient = LightClientBuilder::new() @@ -188,10 +145,8 @@ async fn light_client_testing() -> Result<(), subxt::Error> { finalized_headers_subscription(&api).await?; runtime_api_call(&api).await?; storage_plain_lookup(&api).await?; - follow_chain_head(&api).await?; dynamic_constant_query(&api).await?; dynamic_events(&api).await?; - various_rpc_calls(&api).await?; Ok(()) } diff --git a/testing/integration-tests/src/utils/node_proc.rs b/testing/integration-tests/src/utils/node_proc.rs index c834dd51fc..aa9bdacff2 100644 --- a/testing/integration-tests/src/utils/node_proc.rs +++ b/testing/integration-tests/src/utils/node_proc.rs @@ -4,7 +4,10 @@ use std::ffi::{OsStr, OsString}; use substrate_runner::SubstrateNode; -use subxt::{Config, OnlineClient}; +use subxt::{ + backend::{legacy, rpc}, + Config, OnlineClient, +}; #[cfg(feature = "unstable-light-client")] use subxt::client::{LightClient, LightClientBuilder}; @@ -12,7 +15,7 @@ use subxt::client::{LightClient, LightClientBuilder}; /// Spawn a local substrate node for testing subxt. pub struct TestNodeProcess { // Keep a handle to the node; once it's dropped the node is killed. - _proc: SubstrateNode, + proc: SubstrateNode, #[cfg(not(feature = "unstable-light-client"))] client: OnlineClient, @@ -33,6 +36,15 @@ where TestNodeProcessBuilder::new(paths) } + /// Hand back an RPC client connected to the test node. + pub async fn legacy_rpc_methods(&self) -> legacy::LegacyRpcMethods { + let url = format!("ws://127.0.0.1:{}", self.proc.ws_port()); + let rpc_client = rpc::RpcClient::from_url(url) + .await + .expect("Unable to connect RPC client to test node"); + legacy::LegacyRpcMethods::new(rpc_client) + } + /// Returns the subxt client connected to the running node. #[cfg(not(feature = "unstable-light-client"))] pub fn client(&self) -> OnlineClient { @@ -101,10 +113,7 @@ impl TestNodeProcessBuilder { let client = OnlineClient::from_url(ws_url.clone()).await; match client { - Ok(client) => Ok(TestNodeProcess { - _proc: proc, - client, - }), + Ok(client) => Ok(TestNodeProcess { proc, client }), Err(err) => Err(format!("Failed to connect to node rpc at {ws_url}: {err}")), } } diff --git a/testing/integration-tests/src/utils/wait_for_blocks.rs b/testing/integration-tests/src/utils/wait_for_blocks.rs index 2934baee16..b21180a24a 100644 --- a/testing/integration-tests/src/utils/wait_for_blocks.rs +++ b/testing/integration-tests/src/utils/wait_for_blocks.rs @@ -6,14 +6,14 @@ use subxt::{client::OnlineClientT, Config}; /// Wait for blocks to be produced before running tests. Waiting for two blocks /// (the genesis block and another one) seems to be enough to allow tests -/// like `dry_run_passes` to work properly. +/// like `validation_passes` to work properly. /// /// If the "unstable-light-client" feature flag is enabled, this will wait for /// 5 blocks instead of two. The light client needs the extra blocks to avoid /// errors caused by loading information that is not available in the first 2 blocks /// (`Failed to load the block weight for block`). pub async fn wait_for_blocks(api: &impl OnlineClientT) { - let mut sub = api.rpc().subscribe_all_block_headers().await.unwrap(); + let mut sub = api.backend().stream_all_block_headers().await.unwrap(); sub.next().await; sub.next().await; diff --git a/testing/wasm-rpc-tests/tests/wasm.rs b/testing/wasm-rpc-tests/tests/wasm.rs index 4875e8ab24..67378b9900 100644 --- a/testing/wasm-rpc-tests/tests/wasm.rs +++ b/testing/wasm-rpc-tests/tests/wasm.rs @@ -29,6 +29,6 @@ async fn wasm_ws_transport_works() { .await .unwrap(); - let chain = client.rpc().system_chain().await.unwrap(); - assert_eq!(&chain, "Development"); + let mut stream = client.backend().stream_best_block_headers().await.unwrap(); + stream.next().await; }