diff --git a/Cargo.lock b/Cargo.lock index c6a41ff2f2..b4b87e30dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1953,9 +1953,9 @@ dependencies = [ [[package]] name = "frame-decode" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fb3bfa2988ef40247e0e0eecfb171a01ad6f4e399485503aad4c413a5f236f3" +checksum = "9cb2b069fdf47c62526c6c7a64c5edba9c3c41b4bb11dac8e4fbf8e2857859a0" dependencies = [ "frame-metadata 23.0.0", "parity-scale-codec", diff --git a/Cargo.toml b/Cargo.toml index 93a19ffe27..9f9cee07e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,7 +82,7 @@ darling = "0.20.10" derive-where = "1.2.7" either = { version = "1.13.0", default-features = false } finito = { version = "0.1.0", default-features = false } -frame-decode = { version = "0.15.0", default-features = false } +frame-decode = { version = "0.16.0", default-features = false } frame-metadata = { version = "23.0.0", default-features = false } futures = { version = "0.3.31", default-features = false, features = ["std"] } getrandom = { version = "0.2", default-features = false } diff --git a/metadata/src/lib.rs b/metadata/src/lib.rs index daadc37205..877755b285 100644 --- a/metadata/src/lib.rs +++ b/metadata/src/lib.rs @@ -192,6 +192,7 @@ impl frame_decode::storage::StorageTypeInfo for Metadata { .default_value .as_ref() .map(|def| Cow::Borrowed(&**def)), + use_old_v9_storage_hashers: false, }; Ok(info) diff --git a/new/src/backend.rs b/new/src/backend.rs new file mode 100644 index 0000000000..7ba400ec28 --- /dev/null +++ b/new/src/backend.rs @@ -0,0 +1,353 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a backend trait for Subxt which allows us to get and set +//! the necessary information (probably from a JSON-RPC API, but that's up to the +//! implementation). + +mod chain_head; +mod archive; +mod legacy; +mod combined; +mod utils; + +use crate::config::{Config, HashFor}; +use crate::error::BackendError; +use async_trait::async_trait; +use codec::{Decode, Encode}; +use futures::{Stream, StreamExt}; +use std::pin::Pin; +use std::sync::Arc; +use subxt_metadata::Metadata; + +/// Prevent the backend trait being implemented externally. +#[doc(hidden)] +pub(crate) mod sealed { + pub trait Sealed {} +} + +/// This trait exposes the interface that Subxt will use to communicate with +/// a backend. Its goal is to be as minimal as possible. +#[async_trait] +pub trait Backend: sealed::Sealed + Send + Sync + 'static { + /// Fetch values from storage. + async fn storage_fetch_values( + &self, + keys: Vec>, + at: HashFor, + ) -> Result, BackendError>; + + /// Fetch keys underneath the given key from storage. + async fn storage_fetch_descendant_keys( + &self, + key: Vec, + at: HashFor, + ) -> Result>, BackendError>; + + /// Fetch values underneath the given key from storage. + async fn storage_fetch_descendant_values( + &self, + key: Vec, + at: HashFor, + ) -> Result, BackendError>; + + /// Fetch the genesis hash + async fn genesis_hash(&self) -> Result, BackendError>; + + /// Get a block header + async fn block_header(&self, at: HashFor) -> Result, BackendError>; + + /// Return the extrinsics found in the block. Each extrinsic is represented + /// by a vector of bytes which has _not_ been SCALE decoded (in other words, the + /// first bytes in the vector will decode to the compact encoded length of the extrinsic) + async fn block_body(&self, at: HashFor) -> Result>>, BackendError>; + + /// Get the most recent finalized block hash. + /// Note: needed only in blocks client for finalized block stream; can prolly be removed. + async fn latest_finalized_block_ref(&self) -> Result>, BackendError>; + + /// A stream of all new block headers as they arrive. + async fn stream_all_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError>; + + /// A stream of best block headers. + async fn stream_best_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError>; + + /// A stream of finalized block headers. + async fn stream_finalized_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError>; + + /// Submit a transaction. This will return a stream of events about it. + async fn submit_transaction( + &self, + bytes: &[u8], + ) -> Result>>, BackendError>; + + /// Make a call to some runtime API. + async fn call( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: HashFor, + ) -> Result, BackendError>; +} + +/// helpful utility methods derived from those provided on [`Backend`] +#[async_trait] +pub trait BackendExt: Backend { + /// Fetch a single value from storage. + async fn storage_fetch_value( + &self, + key: Vec, + at: HashFor, + ) -> Result>, BackendError> { + self.storage_fetch_values(vec![key], at) + .await? + .next() + .await + .transpose() + .map(|o| o.map(|s| s.value)) + } + + /// The same as a [`Backend::call()`], but it will also attempt to decode the + /// result into the given type, which is a fairly common operation. + async fn call_decoding( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: HashFor, + ) -> Result { + let bytes = self.call(method, call_parameters, at).await?; + let res = + D::decode(&mut &*bytes).map_err(BackendError::CouldNotScaleDecodeRuntimeResponse)?; + Ok(res) + } + + /// Return the metadata at some version. + async fn metadata_at_version( + &self, + version: u32, + at: HashFor, + ) -> Result { + let param = version.encode(); + + let opaque: Option = self + .call_decoding("Metadata_metadata_at_version", Some(¶m), at) + .await?; + let Some(opaque) = opaque else { + return Err(BackendError::MetadataVersionNotFound(version)); + }; + + let metadata: Metadata = + Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?; + Ok(metadata) + } + + /// Return V14 metadata from the legacy `Metadata_metadata` call. + async fn legacy_metadata(&self, at: HashFor) -> Result { + let opaque: frame_metadata::OpaqueMetadata = + self.call_decoding("Metadata_metadata", None, at).await?; + let metadata: Metadata = + Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?; + Ok(metadata) + } +} + +#[async_trait] +impl + ?Sized, T: Config> BackendExt for B {} + +/// An opaque struct which, while alive, indicates that some references to a block +/// still exist. This gives the backend the opportunity to keep the corresponding block +/// details around for a while if it likes and is able to. No guarantees can be made about +/// how long the corresponding details might be available for, but if no references to a block +/// exist, then the backend is free to discard any details for it. +#[derive(Clone)] +pub struct BlockRef { + hash: H, + // We keep this around so that when it is dropped, it has the + // opportunity to tell the backend. + _pointer: Option>, +} + +impl From for BlockRef { + fn from(value: H) -> Self { + BlockRef::from_hash(value) + } +} + +impl PartialEq for BlockRef { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + } +} +impl Eq for BlockRef {} + +// Manual implementation to work around https://github.com/mcarton/rust-derivative/issues/115. +impl PartialOrd for BlockRef { + fn partial_cmp(&self, other: &Self) -> Option { + self.hash.partial_cmp(&other.hash) + } +} + +impl Ord for BlockRef { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.hash.cmp(&other.hash) + } +} + +impl std::fmt::Debug for BlockRef { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("BlockRef").field(&self.hash).finish() + } +} + +impl std::hash::Hash for BlockRef { + fn hash(&self, state: &mut Hasher) { + self.hash.hash(state); + } +} + +impl BlockRef { + /// A [`BlockRef`] that doesn't reference a given block, but does have an associated hash. + /// This is used in the legacy backend, which has no notion of pinning blocks. + pub fn from_hash(hash: H) -> Self { + Self { + hash, + _pointer: None, + } + } + /// Construct a [`BlockRef`] from an instance of the underlying trait. It's expected + /// that the [`Backend`] implementation will call this if it wants to track which blocks + /// are potentially in use. + pub fn new(hash: H, inner: P) -> Self { + Self { + hash, + _pointer: Some(Arc::new(inner)), + } + } + + /// Return the hash of the referenced block. + pub fn hash(&self) -> H + where + H: Copy, + { + self.hash + } +} + +/// A trait that a [`Backend`] can implement to know when some block +/// can be unpinned: when this is dropped, there are no remaining references +/// to the block that it's associated with. +pub trait BlockRefT: Send + Sync + 'static {} + +/// Runtime version information needed to submit transactions. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct RuntimeVersion { + /// Version of the runtime specification. A full-node will not attempt to use its native + /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + /// `spec_version` and `authoring_version` are the same between Wasm and native. + pub spec_version: u32, + /// All existing dispatches are fully compatible when this number doesn't change. If this + /// number changes, then `spec_version` must change, also. + /// + /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, + /// either through an alteration in its user-level semantics, a parameter + /// added/removed/changed, a dispatchable being removed, a module being removed, or a + /// dispatchable/module changing its index. + /// + /// It need *not* change when a new module is added or when a dispatchable is added. + pub transaction_version: u32, +} + +/// A stream of some item. +pub struct StreamOf(Pin + Send + 'static>>); + +impl Stream for StreamOf { + type Item = T; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.0.poll_next_unpin(cx) + } +} + +impl std::fmt::Debug for StreamOf { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("StreamOf").field(&"").finish() + } +} + +impl StreamOf { + /// Construct a new stream. + pub fn new(inner: Pin + Send + 'static>>) -> Self { + StreamOf(inner) + } + + /// Returns the next item in the stream. This is just a wrapper around + /// [`StreamExt::next()`] so that you can avoid the extra import. + pub async fn next(&mut self) -> Option { + StreamExt::next(self).await + } +} + +/// A stream of [`Result`]. +pub type StreamOfResults = StreamOf>; + +/// The status of the transaction. +/// +/// If the status is [`TransactionStatus::InFinalizedBlock`], [`TransactionStatus::Error`], +/// [`TransactionStatus::Invalid`] or [`TransactionStatus::Dropped`], then no future +/// events will be emitted. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum TransactionStatus { + /// Transaction is part of the future queue. + Validated, + /// The transaction has been broadcast to other nodes. + Broadcasted, + /// Transaction is no longer in a best block. + NoLongerInBestBlock, + /// Transaction has been included in block with given hash. + InBestBlock { + /// Block hash the transaction is in. + hash: BlockRef, + }, + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA + InFinalizedBlock { + /// Block hash the transaction is in. + hash: BlockRef, + }, + /// Something went wrong in the node. + Error { + /// Human readable message; what went wrong. + message: String, + }, + /// Transaction is invalid (bad nonce, signature etc). + Invalid { + /// Human readable message; why was it invalid. + message: String, + }, + /// The transaction was dropped. + Dropped { + /// Human readable message; why was it dropped. + message: String, + }, +} + +/// A response from calls like [`Backend::storage_fetch_values`] or +/// [`Backend::storage_fetch_descendant_values`]. +#[derive(serde::Serialize, serde::Deserialize, Clone, PartialEq, Debug)] +pub struct StorageResponse { + /// The key. + pub key: Vec, + /// The associated value. + pub value: Vec, +} diff --git a/new/src/backend/archive.rs b/new/src/backend/archive.rs new file mode 100644 index 0000000000..3a1cecb73d --- /dev/null +++ b/new/src/backend/archive.rs @@ -0,0 +1,212 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a backend implementation based on the new APIs +//! described at . See +//! [`rpc_methods`] for the raw API calls. +//! +//! Specifically, the focus here is on the `archive` methods. These can only be used +//! to interact with archive nodes, but are less restrictive than the `chainHead` methods +//! in terms of the allowed operations. +//! +//! # Warning +//! +//! Everything in this module is **unstable**, meaning that it could change without +//! warning at any time. + +mod storage_stream; + +use crate::backend::{ + Backend, BlockRef, StorageResponse, StreamOf, StreamOfResults, + TransactionStatus, utils::retry, +}; +use crate::config::{Config, HashFor, RpcConfigFor}; +use crate::error::BackendError; +use async_trait::async_trait; +use futures::StreamExt; +use subxt_rpcs::RpcClient; +use subxt_rpcs::methods::chain_head::{ + ArchiveStorageQuery, ArchiveCallResult, StorageQueryType, +}; +use storage_stream::ArchiveStorageStream; + +/// Re-export RPC types and methods from [`subxt_rpcs::methods::chain_head`]. +pub mod rpc_methods { + pub use subxt_rpcs::methods::chain_head::*; +} + +// Expose the RPC methods. +pub use subxt_rpcs::methods::chain_head::ChainHeadRpcMethods as ArchiveRpcMethods; + +/// The archive backend. +#[derive(Debug, Clone)] +pub struct ArchiveBackend { + // RPC methods we'll want to call: + methods: ArchiveRpcMethods>, +} + +impl ArchiveBackend { + /// Configure and construct an [`ArchiveBackend`] and the associated [`ChainHeadBackendDriver`]. + pub fn new(client: impl Into,) -> ArchiveBackend { + let methods = ArchiveRpcMethods::new(client.into()); + + ArchiveBackend { methods } + } +} + +#[async_trait] +impl Backend for ArchiveBackend { + async fn storage_fetch_values( + &self, + keys: Vec>, + at: HashFor, + ) -> Result, BackendError> { + let queries = keys.into_iter() + .map(|key| ArchiveStorageQuery { + key: key, + query_type: StorageQueryType::Value, + pagination_start_key: None, + }) + .collect(); + + let stream = ArchiveStorageStream::new(at, self.methods.clone(), queries).map(|item| { + match item { + Err(e) => Some(Err(e)), + Ok(item) => item.value.map(|val| Ok(StorageResponse { key: item.key.0, value: val.0 })) + } + }).filter_map(async |item| item); + + Ok(StreamOf(Box::pin(stream))) + } + + async fn storage_fetch_descendant_keys( + &self, + key: Vec, + at: HashFor, + ) -> Result>, BackendError> { + let queries = std::iter::once(ArchiveStorageQuery { + key: key, + // Just ask for the hash and then ignore it and return keys + query_type: StorageQueryType::DescendantsHashes, + pagination_start_key: None, + }) + .collect(); + + let stream = ArchiveStorageStream::new(at, self.methods.clone(), queries).map(|item| { + match item { + Err(e) => Err(e), + Ok(item) => Ok(item.key.0) + } + }); + + Ok(StreamOf(Box::pin(stream))) + } + + async fn storage_fetch_descendant_values( + &self, + key: Vec, + at: HashFor, + ) -> Result, BackendError> { + let queries = std::iter::once(ArchiveStorageQuery { + key: key, + query_type: StorageQueryType::DescendantsValues, + pagination_start_key: None, + }) + .collect(); + + let stream = ArchiveStorageStream::new(at, self.methods.clone(), queries).map(|item| { + match item { + Err(e) => Some(Err(e)), + Ok(item) => item.value.map(|val| Ok(StorageResponse { key: item.key.0, value: val.0 })) + } + }).filter_map(async |item| item); + + Ok(StreamOf(Box::pin(stream))) + } + + async fn genesis_hash(&self) -> Result, BackendError> { + retry(|| async { + let hash = self.methods.archive_v1_genesis_hash().await?; + Ok(hash) + }) + .await + } + + async fn block_header(&self, at: HashFor) -> Result, BackendError> { + retry(|| async { + let header = self.methods.archive_v1_header(at).await?; + Ok(header) + }) + .await + } + + async fn block_body(&self, at: HashFor) -> Result>>, BackendError> { + retry(|| async { + let Some(exts) = self.methods.archive_v1_body(at).await? else { + return Ok(None); + }; + Ok(Some( + exts.into_iter().map(|ext| ext.0).collect() + )) + }) + .await + } + + async fn latest_finalized_block_ref(&self) -> Result>, BackendError> { + retry(|| async { + let height = self.methods.archive_v1_finalized_height().await?; + let mut hashes = self.methods.archive_v1_hash_by_height(height).await?; + let Some(hash) = hashes.pop() else { + return Err(BackendError::Other("Multiple hashes not expected at a finalized height".into())) + }; + Ok(BlockRef::from_hash(hash)) + }) + .await + } + + async fn stream_all_block_headers( + &self, + _hasher: T::Hasher, + ) -> Result>)>, BackendError> { + Err(BackendError::Other("The archive backend cannot stream block headers".into())) + } + + async fn stream_best_block_headers( + &self, + _hasher: T::Hasher, + ) -> Result>)>, BackendError> { + Err(BackendError::Other("The archive backend cannot stream block headers".into())) + } + + async fn stream_finalized_block_headers( + &self, + _hasher: T::Hasher, + ) -> Result>)>, BackendError> { + Err(BackendError::Other("The archive backend cannot stream block headers".into())) + } + + async fn submit_transaction( + &self, + extrinsic: &[u8], + ) -> Result>>, BackendError> { + // This chainHead impl does not use chainHead_follow and so is suitable here too. + super::chain_head::submit_transaction_ignoring_follow_events(extrinsic, &self.methods).await + } + + async fn call( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: HashFor, + ) -> Result, BackendError> { + let res = self.methods.archive_v1_call(at, method, call_parameters.unwrap_or(&[])).await?; + match res { + ArchiveCallResult::Success(bytes) => Ok(bytes.0), + ArchiveCallResult::Error(e) => Err(BackendError::Other(e)), + } + } +} + + impl crate::backend::sealed::Sealed for ArchiveBackend {} + diff --git a/new/src/backend/archive/storage_stream.rs b/new/src/backend/archive/storage_stream.rs new file mode 100644 index 0000000000..7839402abe --- /dev/null +++ b/new/src/backend/archive/storage_stream.rs @@ -0,0 +1,178 @@ +use std::collections::VecDeque; +use subxt_rpcs::Error as RpcError; +use subxt_rpcs::methods::chain_head::{ArchiveStorageQuery, ArchiveStorageSubscription, ArchiveStorageEvent, ArchiveStorageEventItem}; +use std::pin::Pin; +use std::future::Future; +use futures::{FutureExt, Stream, StreamExt}; +use std::task::{Context, Poll}; +use crate::error::BackendError; +use crate::config::{Config, HashFor, RpcConfigFor}; +use super::ArchiveRpcMethods; + +pub struct ArchiveStorageStream { + at: HashFor, + methods: ArchiveRpcMethods>, + query_queue: VecDeque>>, + state: Option>, +} + +enum StreamState { + GetSubscription { + current_query: ArchiveStorageQuery>, + sub_fut: Pin>, RpcError>> + Send + 'static>> + }, + RunSubscription { + current_query: ArchiveStorageQuery>, + sub: ArchiveStorageSubscription> + }, +} + +impl ArchiveStorageStream { + /// Fetch descendant keys. + pub fn new( + at: HashFor, + methods: ArchiveRpcMethods>, + query_queue: VecDeque>>, + ) -> Self { + Self { + at, + methods, + query_queue, + state: None, + } + } +} + +impl std::marker::Unpin for ArchiveStorageStream {} + +impl Stream for ArchiveStorageStream { + type Item = Result>, BackendError>; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.as_mut(); + + loop { + match this.state.take() { + // No state yet so initialise! + None => { + // Nothing left; we're done. + let Some(query) = this.query_queue.pop_front() else { + return Poll::Ready(None); + }; + + let at = this.at; + let methods = this.methods.clone(); + let current_query = query.clone(); + let sub_fut = async move { + let query = std::iter::once(ArchiveStorageQuery { + key: query.key.as_ref(), + query_type: query.query_type, + pagination_start_key: query.pagination_start_key.as_deref(), + }); + + methods.archive_v1_storage( + at, + query, + None + ).await + }; + + this.state = Some(StreamState::GetSubscription { + current_query, + sub_fut: Box::pin(sub_fut) + }); + }, + // We're getting our subscription stream for the current query. + Some(StreamState::GetSubscription { current_query, mut sub_fut }) => { + match sub_fut.poll_unpin(cx) { + Poll::Ready(Ok(sub)) => { + this.state = Some(StreamState::RunSubscription { + current_query, + sub + }); + }, + Poll::Ready(Err(e)) => { + if e.is_disconnected_will_reconnect() { + // Push the query back onto the queue to try again + this.query_queue.push_front(current_query); + continue; + } + + this.state = None; + return Poll::Ready(Some(Err(e.into()))) + } + Poll::Pending => { + this.state = Some(StreamState::GetSubscription { + current_query, + sub_fut + }); + return Poll::Pending + }, + } + }, + // Running the subscription and returning results. + Some(StreamState::RunSubscription { current_query, mut sub }) => { + match sub.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(val))) => { + match val { + ArchiveStorageEvent::Item(item) => { + this.state = Some(StreamState::RunSubscription { + current_query: ArchiveStorageQuery { + key: current_query.key, + query_type: current_query.query_type, + // In the event of error, we resume from the last seen value. + // At the time of writing, it's not clear if paginationStartKey + // starts from the key itself or the first key after it: + // https://github.com/paritytech/json-rpc-interface-spec/issues/176 + pagination_start_key: Some(item.key.0.clone()) + }, + sub + }); + + // We treat `paginationStartKey` as being the key we want results to begin _after_. + // So, if we see a value that's <= it, ignore the value. + let ignore_this_value = current_query + .pagination_start_key + .as_ref() + .is_some_and(|k| item.key.0.cmp(k).is_le()); + + if ignore_this_value { + continue; + } + + return Poll::Ready(Some(Ok(item))); + }, + ArchiveStorageEvent::Error(e) => { + this.state = None; + return Poll::Ready(Some(Err(BackendError::Other(e.error)))) + }, + ArchiveStorageEvent::Done => { + this.state = None; + continue; + }, + } + }, + Poll::Ready(Some(Err(e))) => { + if e.is_disconnected_will_reconnect() { + // Put the current query back into the queue and retry. + // We've been keeping it uptodate as needed. + this.query_queue.push_front(current_query); + this.state = None; + continue; + } + + this.state = None; + return Poll::Ready(Some(Err(e.into()))); + } + Poll::Ready(None) => { + this.state = None; + continue; + } + Poll::Pending => { + return Poll::Pending + }, + } + }, + } + } + } +} diff --git a/new/src/backend/chain_head.rs b/new/src/backend/chain_head.rs new file mode 100644 index 0000000000..8b2eba8481 --- /dev/null +++ b/new/src/backend/chain_head.rs @@ -0,0 +1,789 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a backend implementation based on the new APIs +//! described at . See +//! [`rpc_methods`] for the raw API calls. +//! +//! Specifically, the focus here is on the `chainHead` methods. +//! +//! # Warning +//! +//! Everything in this module is **unstable**, meaning that it could change without +//! warning at any time. + +mod follow_stream; +mod follow_stream_driver; +mod follow_stream_unpin; +mod storage_items; + +use self::follow_stream_driver::FollowStreamFinalizedHeads; +use crate::backend::{ + Backend, BlockRef, BlockRefT, StorageResponse, StreamOf, StreamOfResults, + TransactionStatus, utils::retry, +}; +use crate::config::{Config, Hash, HashFor, RpcConfigFor}; +use crate::error::{BackendError, RpcError}; +use async_trait::async_trait; +use follow_stream_driver::{FollowStreamDriver, FollowStreamDriverHandle}; +use futures::future::Either; +use futures::{Stream, StreamExt}; +use std::collections::HashMap; +use std::task::Poll; +use storage_items::StorageItems; +use subxt_rpcs::RpcClient; +use subxt_rpcs::methods::chain_head::{ + FollowEvent, MethodResponse, StorageQuery, StorageQueryType, StorageResultType, +}; + +/// Re-export RPC types and methods from [`subxt_rpcs::methods::chain_head`]. +pub mod rpc_methods { + pub use subxt_rpcs::methods::chain_head::*; +} + +// Expose the RPC methods. +pub use subxt_rpcs::methods::chain_head::ChainHeadRpcMethods; + +/// Configure and build an [`ChainHeadBackend`]. +pub struct ChainHeadBackendBuilder { + max_block_life: usize, + transaction_timeout_secs: usize, + submit_transactions_ignoring_follow_events: bool, + _marker: std::marker::PhantomData, +} + +impl Default for ChainHeadBackendBuilder { + fn default() -> Self { + Self::new() + } +} + +impl ChainHeadBackendBuilder { + /// Create a new [`ChainHeadBackendBuilder`]. + pub fn new() -> Self { + Self { + max_block_life: usize::MAX, + transaction_timeout_secs: 240, + submit_transactions_ignoring_follow_events: false, + _marker: std::marker::PhantomData, + } + } + + /// The age of a block is defined here as the difference between the current finalized block number + /// and the block number of a given block. Once the difference equals or exceeds the number given + /// here, the block is unpinned. + /// + /// By default, we will never automatically unpin blocks, but if the number of pinned blocks that we + /// keep hold of exceeds the number that the server can tolerate, then a `stop` event is generated and + /// we are forced to resubscribe, losing any pinned blocks. + pub fn max_block_life(mut self, max_block_life: usize) -> Self { + self.max_block_life = max_block_life; + self + } + + /// When a transaction is submitted, we wait for events indicating it's successfully made it into a finalized + /// block. If it takes too long for this to happen, we assume that something went wrong and that we should + /// give up waiting. + /// + /// Provide a value here to denote how long, in seconds, to wait before giving up. Defaults to 240 seconds. + /// + /// If [`Self::submit_transactions_ignoring_follow_events()`] is called, this timeout is ignored. + pub fn transaction_timeout(mut self, timeout_secs: usize) -> Self { + self.transaction_timeout_secs = timeout_secs; + self + } + + /// When a transaction is submitted, we normally synchronize the events that we get back with events from + /// our background `chainHead_follow` subscription, to ensure that any blocks hashes that we see can be + /// immediately queried (for example to get events or state at that block), and are kept around unless they + /// are no longer needed. + /// + /// The main downside of this synchronization is that there may be a delay in being handed back a + /// [`TransactionStatus::InFinalizedBlock`] event while we wait to see the same block hash emitted from + /// our background `chainHead_follow` subscription in order to ensure it's available for querying. + /// + /// Calling this method turns off this synchronization, speeding up the response and removing any reliance + /// on the `chainHead_follow` subscription continuing to run without stopping throughout submitting a transaction. + /// + /// # Warning + /// + /// This can lead to errors when calling APIs like `wait_for_finalized_success`, which will try to retrieve events + /// at the finalized block, because there will be a race and the finalized block may not be available for querying + /// yet. + pub fn submit_transactions_ignoring_follow_events(mut self) -> Self { + self.submit_transactions_ignoring_follow_events = true; + self + } + + /// A low-level API to build the backend and driver which requires polling the driver for the backend + /// to make progress. + /// + /// This is useful if you want to manage the driver yourself, for example if you want to run it in on + /// a specific runtime. + /// + /// If you just want to run the driver in the background until completion in on the default runtime, + /// use [`ChainHeadBackendBuilder::build_with_background_driver`] instead. + pub fn build( + self, + client: impl Into, + ) -> (ChainHeadBackend, ChainHeadBackendDriver) { + // Construct the underlying follow_stream layers: + let rpc_methods = ChainHeadRpcMethods::new(client.into()); + let follow_stream = + follow_stream::FollowStream::>::from_methods(rpc_methods.clone()); + let follow_stream_unpin = + follow_stream_unpin::FollowStreamUnpin::>::from_methods( + follow_stream, + rpc_methods.clone(), + self.max_block_life, + ); + let follow_stream_driver = FollowStreamDriver::new(follow_stream_unpin); + + // Wrap these into the backend and driver that we'll expose. + let backend = ChainHeadBackend { + methods: rpc_methods, + follow_handle: follow_stream_driver.handle(), + transaction_timeout_secs: self.transaction_timeout_secs, + submit_transactions_ignoring_follow_events: self + .submit_transactions_ignoring_follow_events, + }; + let driver = ChainHeadBackendDriver { + driver: follow_stream_driver, + }; + + (backend, driver) + } + + /// An API to build the backend and driver which will run in the background until completion + /// on the default runtime. + /// + /// - On non-wasm targets, this will spawn the driver on `tokio`. + /// - On wasm targets, this will spawn the driver on `wasm-bindgen-futures`. + #[cfg(feature = "runtime")] + pub fn build_with_background_driver(self, client: impl Into) -> ChainHeadBackend { + fn spawn(future: F) { + #[cfg(not(target_family = "wasm"))] + tokio::spawn(async move { + future.await; + }); + #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] + wasm_bindgen_futures::spawn_local(async move { + future.await; + }); + } + + let (backend, mut driver) = self.build(client); + spawn(async move { + // NOTE: we need to poll the driver until it's done i.e returns None + // to ensure that the backend is shutdown properly. + while let Some(res) = driver.next().await { + if let Err(err) = res { + tracing::debug!(target: "subxt", "chainHead backend error={err}"); + } + } + + tracing::debug!(target: "subxt", "chainHead backend was closed"); + }); + + backend + } +} + +/// Driver for the [`ChainHeadBackend`]. This must be polled in order for the +/// backend to make progress. +#[derive(Debug)] +pub struct ChainHeadBackendDriver { + driver: FollowStreamDriver>, +} + +impl Stream for ChainHeadBackendDriver { + type Item = > as Stream>::Item; + fn poll_next( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.driver.poll_next_unpin(cx) + } +} + +/// The chainHead backend. +#[derive(Debug, Clone)] +pub struct ChainHeadBackend { + // RPC methods we'll want to call: + methods: ChainHeadRpcMethods>, + // A handle to the chainHead_follow subscription: + follow_handle: FollowStreamDriverHandle>, + // How long to wait until giving up on transactions: + transaction_timeout_secs: usize, + // Don't synchronise blocks with chainHead_follow when submitting txs: + submit_transactions_ignoring_follow_events: bool, +} + +impl ChainHeadBackend { + /// Configure and construct an [`ChainHeadBackend`] and the associated [`ChainHeadBackendDriver`]. + pub fn builder() -> ChainHeadBackendBuilder { + ChainHeadBackendBuilder::new() + } + + /// Stream block headers based on the provided filter fn + async fn stream_headers( + &self, + f: F, + ) -> Result>)>, BackendError> + where + F: Fn( + FollowEvent>>, + ) -> Vec>> + + Send + + Sync + + 'static, + { + let methods = self.methods.clone(); + + let headers = + FollowStreamFinalizedHeads::new(self.follow_handle.subscribe(), f).flat_map(move |r| { + let methods = methods.clone(); + + let (sub_id, block_refs) = match r { + Ok(ev) => ev, + Err(e) => return Either::Left(futures::stream::once(async { Err(e) })), + }; + + Either::Right( + futures::stream::iter(block_refs).filter_map(move |block_ref| { + let methods = methods.clone(); + let sub_id = sub_id.clone(); + + async move { + let res = methods + .chainhead_v1_header(&sub_id, block_ref.hash()) + .await + .transpose()?; + + let header = match res { + Ok(header) => header, + Err(e) => return Some(Err(e.into())), + }; + + Some(Ok((header, block_ref.into()))) + } + }), + ) + }); + + Ok(StreamOf(Box::pin(headers))) + } +} + +impl BlockRefT for follow_stream_unpin::BlockRef {} +impl From> for BlockRef { + fn from(b: follow_stream_unpin::BlockRef) -> Self { + BlockRef::new(b.hash(), b) + } +} + +impl super::sealed::Sealed for ChainHeadBackend {} + +#[async_trait] +impl Backend for ChainHeadBackend { + async fn storage_fetch_values( + &self, + keys: Vec>, + at: HashFor, + ) -> Result, BackendError> { + retry(|| async { + let queries = keys.iter().map(|key| StorageQuery { + key: &**key, + query_type: StorageQueryType::Value, + }); + + let storage_items = + StorageItems::from_methods(queries, at, &self.follow_handle, self.methods.clone()) + .await?; + + let stream = storage_items.filter_map(async |val| { + let val = match val { + Ok(val) => val, + Err(e) => return Some(Err(e)), + }; + + let StorageResultType::Value(result) = val.result else { + return None; + }; + Some(Ok(StorageResponse { + key: val.key.0, + value: result.0, + })) + }); + + Ok(StreamOf(Box::pin(stream))) + }) + .await + } + + async fn storage_fetch_descendant_keys( + &self, + key: Vec, + at: HashFor, + ) -> Result>, BackendError> { + retry(|| async { + // Ask for hashes, and then just ignore them and return the keys that come back. + let query = StorageQuery { + key: &*key, + query_type: StorageQueryType::DescendantsHashes, + }; + + let storage_items = StorageItems::from_methods( + std::iter::once(query), + at, + &self.follow_handle, + self.methods.clone(), + ) + .await?; + + let storage_result_stream = storage_items.map(|val| val.map(|v| v.key.0)); + Ok(StreamOf(Box::pin(storage_result_stream))) + }) + .await + } + + async fn storage_fetch_descendant_values( + &self, + key: Vec, + at: HashFor, + ) -> Result, BackendError> { + retry(|| async { + let query = StorageQuery { + key: &*key, + query_type: StorageQueryType::DescendantsValues, + }; + + let storage_items = StorageItems::from_methods( + std::iter::once(query), + at, + &self.follow_handle, + self.methods.clone(), + ) + .await?; + + let storage_result_stream = storage_items.filter_map(async |val| { + let val = match val { + Ok(val) => val, + Err(e) => return Some(Err(e)), + }; + + let StorageResultType::Value(result) = val.result else { + return None; + }; + Some(Ok(StorageResponse { + key: val.key.0, + value: result.0, + })) + }); + + Ok(StreamOf(Box::pin(storage_result_stream))) + }) + .await + } + + async fn genesis_hash(&self) -> Result, BackendError> { + retry(|| async { + let genesis_hash = self.methods.chainspec_v1_genesis_hash().await?; + Ok(genesis_hash) + }) + .await + } + + async fn block_header(&self, at: HashFor) -> Result, BackendError> { + retry(|| async { + let sub_id = get_subscription_id(&self.follow_handle).await?; + let header = self.methods.chainhead_v1_header(&sub_id, at).await?; + Ok(header) + }) + .await + } + + async fn block_body(&self, at: HashFor) -> Result>>, BackendError> { + retry(|| async { + let sub_id = get_subscription_id(&self.follow_handle).await?; + + // Subscribe to the body response and get our operationId back. + let follow_events = self.follow_handle.subscribe().events(); + let status = self.methods.chainhead_v1_body(&sub_id, at).await?; + let operation_id = match status { + MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()), + MethodResponse::Started(s) => s.operation_id, + }; + + // Wait for the response to come back with the correct operationId. + let mut exts_stream = follow_events.filter_map(|ev| { + let FollowEvent::OperationBodyDone(body) = ev else { + return std::future::ready(None); + }; + if body.operation_id != operation_id { + return std::future::ready(None); + } + let exts: Vec<_> = body.value.into_iter().map(|ext| ext.0).collect(); + std::future::ready(Some(exts)) + }); + + Ok(exts_stream.next().await) + }) + .await + } + + async fn latest_finalized_block_ref(&self) -> Result>, BackendError> { + let next_ref: Option>> = self + .follow_handle + .subscribe() + .events() + .filter_map(|ev| { + let out = match ev { + FollowEvent::Initialized(init) => { + init.finalized_block_hashes.last().map(|b| b.clone().into()) + } + _ => None, + }; + std::future::ready(out) + }) + .next() + .await; + + next_ref.ok_or_else(|| RpcError::SubscriptionDropped.into()) + } + + async fn stream_all_block_headers( + &self, + _hasher: T::Hasher, + ) -> Result>)>, BackendError> { + // TODO: https://github.com/paritytech/subxt/issues/1568 + // + // It's possible that blocks may be silently missed if + // a reconnection occurs because it's restarted by the unstable backend. + self.stream_headers(|ev| match ev { + FollowEvent::Initialized(init) => init.finalized_block_hashes, + FollowEvent::NewBlock(ev) => { + vec![ev.block_hash] + } + _ => vec![], + }) + .await + } + + async fn stream_best_block_headers( + &self, + _hasher: T::Hasher, + ) -> Result>)>, BackendError> { + // TODO: https://github.com/paritytech/subxt/issues/1568 + // + // It's possible that blocks may be silently missed if + // a reconnection occurs because it's restarted by the unstable backend. + self.stream_headers(|ev| match ev { + FollowEvent::Initialized(init) => init.finalized_block_hashes, + FollowEvent::BestBlockChanged(ev) => vec![ev.best_block_hash], + _ => vec![], + }) + .await + } + + async fn stream_finalized_block_headers( + &self, + _hasher: T::Hasher, + ) -> Result>)>, BackendError> { + self.stream_headers(|ev| match ev { + FollowEvent::Initialized(init) => init.finalized_block_hashes, + FollowEvent::Finalized(ev) => ev.finalized_block_hashes, + _ => vec![], + }) + .await + } + + async fn submit_transaction( + &self, + extrinsic: &[u8], + ) -> Result>>, BackendError> { + if self.submit_transactions_ignoring_follow_events { + submit_transaction_ignoring_follow_events(extrinsic, &self.methods).await + } else { + submit_transaction_tracking_follow_events::( + extrinsic, + self.transaction_timeout_secs as u64, + &self.methods, + &self.follow_handle, + ) + .await + } + } + + async fn call( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: HashFor, + ) -> Result, BackendError> { + retry(|| async { + let sub_id = get_subscription_id(&self.follow_handle).await?; + + // Subscribe to the body response and get our operationId back. + let follow_events = self.follow_handle.subscribe().events(); + let call_parameters = call_parameters.unwrap_or(&[]); + let status = self + .methods + .chainhead_v1_call(&sub_id, at, method, call_parameters) + .await?; + let operation_id = match status { + MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()), + MethodResponse::Started(s) => s.operation_id, + }; + + // Wait for the response to come back with the correct operationId. + let mut call_data_stream = follow_events.filter_map(|ev| { + let FollowEvent::OperationCallDone(body) = ev else { + return std::future::ready(None); + }; + if body.operation_id != operation_id { + return std::future::ready(None); + } + std::future::ready(Some(body.output.0)) + }); + + call_data_stream + .next() + .await + .ok_or_else(|| RpcError::SubscriptionDropped.into()) + }) + .await + } +} + +/// A helper to obtain a subscription ID. +async fn get_subscription_id( + follow_handle: &FollowStreamDriverHandle, +) -> Result { + let Some(sub_id) = follow_handle.subscribe().subscription_id().await else { + return Err(RpcError::SubscriptionDropped.into()); + }; + + Ok(sub_id) +} + +// Submit a transaction. This makes no attempt to sync with follow events, +// This is used in the archive backend too. +pub(crate) async fn submit_transaction_ignoring_follow_events( + extrinsic: &[u8], + methods: &ChainHeadRpcMethods>, +) -> Result>>, BackendError> { + let tx_progress = methods + .transactionwatch_v1_submit_and_watch(extrinsic) + .await? + .map(|ev| { + ev.map(|tx_status| { + use subxt_rpcs::methods::chain_head::TransactionStatus as RpcTransactionStatus; + match tx_status { + RpcTransactionStatus::Validated => TransactionStatus::Validated, + RpcTransactionStatus::Broadcasted => TransactionStatus::Broadcasted, + RpcTransactionStatus::BestChainBlockIncluded { block: None } => { + TransactionStatus::NoLongerInBestBlock + }, + RpcTransactionStatus::BestChainBlockIncluded { block: Some(block) } => { + TransactionStatus::InBestBlock { hash: BlockRef::from_hash(block.hash) } + }, + RpcTransactionStatus::Finalized { block } => { + TransactionStatus::InFinalizedBlock { hash: BlockRef::from_hash(block.hash) } + }, + RpcTransactionStatus::Error { error } => { + TransactionStatus::Error { message: error } + }, + RpcTransactionStatus::Invalid { error } => { + TransactionStatus::Invalid { message: error } + }, + RpcTransactionStatus::Dropped { error } => { + TransactionStatus::Dropped { message: error } + }, + } + }).map_err(Into::into) + }); + + Ok(StreamOf(Box::pin(tx_progress))) +} + +// Submit a transaction. This synchronizes with chainHead_follow events to ensure +// that block hashes returned are ready to be queried. +async fn submit_transaction_tracking_follow_events( + extrinsic: &[u8], + transaction_timeout_secs: u64, + methods: &ChainHeadRpcMethods>, + follow_handle: &FollowStreamDriverHandle>, +) -> Result>>, BackendError> { + // We care about new and finalized block hashes. + enum SeenBlockMarker { + New, + Finalized, + } + + // First, subscribe to new blocks. + let mut seen_blocks_sub = follow_handle.subscribe().events(); + + // Then, submit the transaction. + let mut tx_progress = methods + .transactionwatch_v1_submit_and_watch(extrinsic) + .await?; + + let mut seen_blocks = HashMap::new(); + let mut done = false; + + // If we see the finalized event, we start waiting until we find a finalized block that + // matches, so we can guarantee to return a pinned block hash and be properly in sync + // with chainHead_follow. + let mut finalized_hash: Option> = None; + + // Record the start time so that we can time out if things appear to take too long. + let start_instant = web_time::Instant::now(); + + // A quick helper to return a generic error. + let err_other = |s: &str| Some(Err(BackendError::Other(s.into()))); + + // Now we can attempt to associate tx events with pinned blocks. + let tx_stream = futures::stream::poll_fn(move |cx| { + loop { + // Bail early if we're finished; nothing else to do. + if done { + return Poll::Ready(None); + } + + // Bail if we exceed 4 mins; something very likely went wrong. + if start_instant.elapsed().as_secs() > transaction_timeout_secs { + return Poll::Ready(err_other( + "Timeout waiting for the transaction to be finalized", + )); + } + + // Poll for a follow event, and error if the stream has unexpectedly ended. + let follow_ev_poll = match seen_blocks_sub.poll_next_unpin(cx) { + Poll::Ready(None) => { + return Poll::Ready(err_other( + "chainHead_follow stream ended unexpectedly", + )); + } + Poll::Ready(Some(follow_ev)) => Poll::Ready(follow_ev), + Poll::Pending => Poll::Pending, + }; + let follow_ev_is_pending = follow_ev_poll.is_pending(); + + // If there was a follow event, then handle it and loop around to see if there are more. + // We want to buffer follow events until we hit Pending, so that we are as up-to-date as possible + // for when we see a BestBlockChanged event, so that we have the best change of already having + // seen the block that it mentions and returning a proper pinned block. + if let Poll::Ready(follow_ev) = follow_ev_poll { + match follow_ev { + FollowEvent::NewBlock(ev) => { + // Optimization: once we have a `finalized_hash`, we only care about finalized + // block refs now and can avoid bothering to save new blocks. + if finalized_hash.is_none() { + seen_blocks.insert( + ev.block_hash.hash(), + (SeenBlockMarker::New, ev.block_hash), + ); + } + } + FollowEvent::Finalized(ev) => { + for block_ref in ev.finalized_block_hashes { + seen_blocks.insert( + block_ref.hash(), + (SeenBlockMarker::Finalized, block_ref), + ); + } + } + FollowEvent::Stop => { + // If we get this event, we'll lose all of our existing pinned blocks and have a gap + // in which we may lose the finalized block that the TX is in. For now, just error if + // this happens, to prevent the case in which we never see a finalized block and wait + // forever. + return Poll::Ready(err_other( + "chainHead_follow emitted 'stop' event during transaction submission", + )); + } + _ => {} + } + continue; + } + + // If we have a finalized hash, we are done looking for tx events and we are just waiting + // for a pinned block with a matching hash (which must appear eventually given it's finalized). + if let Some(hash) = &finalized_hash { + if let Some((SeenBlockMarker::Finalized, block_ref)) = + seen_blocks.remove(hash) + { + // Found it! Hand back the event with a pinned block. We're done. + done = true; + let ev = TransactionStatus::InFinalizedBlock { + hash: block_ref.into(), + }; + return Poll::Ready(Some(Ok(ev))); + } else { + // Not found it! If follow ev is pending, then return pending here and wait for + // a new one to come in, else loop around and see if we get another one immediately. + seen_blocks.clear(); + if follow_ev_is_pending { + return Poll::Pending; + } else { + continue; + } + } + } + + // If we don't have a finalized block yet, we keep polling for tx progress events. + let tx_progress_ev = match tx_progress.poll_next_unpin(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => { + return Poll::Ready(err_other( + "No more transaction progress events, but we haven't seen a Finalized one yet", + )); + } + Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e.into()))), + Poll::Ready(Some(Ok(ev))) => ev, + }; + + // When we get one, map it to the correct format (or for finalized ev, wait for the pinned block): + use subxt_rpcs::methods::chain_head::TransactionStatus as RpcTransactionStatus; + let tx_progress_ev = match tx_progress_ev { + RpcTransactionStatus::Finalized { block } => { + // We'll wait until we have seen this hash, to try to guarantee + // that when we return this event, the corresponding block is + // pinned and accessible. + finalized_hash = Some(block.hash); + continue; + } + RpcTransactionStatus::BestChainBlockIncluded { block: Some(block) } => { + // Look up a pinned block ref if we can, else return a non-pinned + // block that likely isn't accessible. We have no guarantee that a best + // block on the node a tx was sent to will ever be known about on the + // chainHead_follow subscription. + let block_ref = match seen_blocks.get(&block.hash) { + Some((_, block_ref)) => block_ref.clone().into(), + None => BlockRef::from_hash(block.hash), + }; + TransactionStatus::InBestBlock { hash: block_ref } + } + RpcTransactionStatus::BestChainBlockIncluded { block: None } => { + TransactionStatus::NoLongerInBestBlock + } + RpcTransactionStatus::Broadcasted => TransactionStatus::Broadcasted, + RpcTransactionStatus::Dropped { error, .. } => { + TransactionStatus::Dropped { message: error } + } + RpcTransactionStatus::Error { error } => { + TransactionStatus::Error { message: error } + } + RpcTransactionStatus::Invalid { error } => { + TransactionStatus::Invalid { message: error } + } + RpcTransactionStatus::Validated => TransactionStatus::Validated, + }; + return Poll::Ready(Some(Ok(tx_progress_ev))); + } + }); + + Ok(StreamOf(Box::pin(tx_stream))) +} \ No newline at end of file diff --git a/new/src/backend/chain_head/follow_stream.rs b/new/src/backend/chain_head/follow_stream.rs new file mode 100644 index 0000000000..958e923477 --- /dev/null +++ b/new/src/backend/chain_head/follow_stream.rs @@ -0,0 +1,336 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::config::{Config, HashFor, RpcConfigFor}; +use crate::error::BackendError; +use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use subxt_rpcs::methods::chain_head::{ChainHeadRpcMethods, FollowEvent}; + +/// A `Stream` whose goal is to remain subscribed to `chainHead_follow`. It will re-subscribe if the subscription +/// is ended for any reason, and it will return the current `subscription_id` as an event, along with the other +/// follow events. +pub struct FollowStream { + // Using this and not just keeping a copy of the RPC methods + // around means that we can test this in isolation with dummy streams. + stream_getter: FollowEventStreamGetter, + stream: InnerStreamState, +} + +impl std::fmt::Debug for FollowStream { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("FollowStream") + .field("stream_getter", &"..") + .field("stream", &self.stream) + .finish() + } +} + +/// A getter function that returns an [`FollowEventStreamFut`]. +pub type FollowEventStreamGetter = Box FollowEventStreamFut + Send>; + +/// The future which will return a stream of follow events and the subscription ID for it. +pub type FollowEventStreamFut = Pin< + Box< + dyn Future, String), BackendError>> + + Send + + 'static, + >, +>; + +/// The stream of follow events. +pub type FollowEventStream = + Pin, BackendError>> + Send + 'static>>; + +/// Either a ready message with the current subscription ID, or +/// an event from the stream itself. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum FollowStreamMsg { + /// The stream is ready (and has a subscription ID) + Ready(String), + /// An event from the stream. + Event(FollowEvent), +} + +impl FollowStreamMsg { + /// Return an event, or none if the message is a "ready" one. + pub fn into_event(self) -> Option> { + match self { + FollowStreamMsg::Ready(_) => None, + FollowStreamMsg::Event(e) => Some(e), + } + } +} + +enum InnerStreamState { + /// We've just created the stream; we'll start Initializing it + New, + /// We're fetching the inner subscription. Move to Ready when we have one. + Initializing(FollowEventStreamFut), + /// Report back the subscription ID here, and then start ReceivingEvents. + Ready(Option<(FollowEventStream, String)>), + /// We are polling for, and receiving events from the stream. + ReceivingEvents(FollowEventStream), + /// We received a stop event. We'll send one on and restart the stream. + Stopped, + /// The stream is finished and will not restart (likely due to an error). + Finished, +} + +impl std::fmt::Debug for InnerStreamState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::New => write!(f, "New"), + Self::Initializing(_) => write!(f, "Initializing(..)"), + Self::Ready(_) => write!(f, "Ready(..)"), + Self::ReceivingEvents(_) => write!(f, "ReceivingEvents(..)"), + Self::Stopped => write!(f, "Stopped"), + Self::Finished => write!(f, "Finished"), + } + } +} + +impl FollowStream { + /// Create a new [`FollowStream`] given a function which returns the stream. + pub fn new(stream_getter: FollowEventStreamGetter) -> Self { + Self { + stream_getter, + stream: InnerStreamState::New, + } + } + + /// Create a new [`FollowStream`] given the RPC methods. + pub fn from_methods(methods: ChainHeadRpcMethods>) -> FollowStream> { + FollowStream { + stream_getter: Box::new(move || { + let methods = methods.clone(); + Box::pin(async move { + // Make the RPC call: + let stream = methods.chainhead_v1_follow(true).await?; + // Extract the subscription ID: + let Some(sub_id) = stream.subscription_id().map(ToOwned::to_owned) else { + return Err(BackendError::Other( + "Subscription ID expected for chainHead_follow response, but not given" + .to_owned(), + )); + }; + // Map stream errors into the higher level subxt one: + let stream = stream.map_err(|e| e.into()); + let stream: FollowEventStream> = Box::pin(stream); + // Return both: + Ok((stream, sub_id)) + }) + }), + stream: InnerStreamState::New, + } + } +} + +impl std::marker::Unpin for FollowStream {} + +impl Stream for FollowStream { + type Item = Result, BackendError>; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + loop { + match &mut this.stream { + InnerStreamState::New => { + let fut = (this.stream_getter)(); + this.stream = InnerStreamState::Initializing(fut); + continue; + } + InnerStreamState::Initializing(fut) => { + match fut.poll_unpin(cx) { + Poll::Pending => { + return Poll::Pending; + } + Poll::Ready(Ok(sub_with_id)) => { + this.stream = InnerStreamState::Ready(Some(sub_with_id)); + continue; + } + Poll::Ready(Err(e)) => { + // Re-start if a reconnecting backend was enabled. + if e.is_disconnected_will_reconnect() { + this.stream = InnerStreamState::Stopped; + continue; + } + + // Finish forever if there's an error, passing it on. + this.stream = InnerStreamState::Finished; + return Poll::Ready(Some(Err(e))); + } + } + } + InnerStreamState::Ready(stream) => { + // We never set the Option to `None`; we just have an Option so + // that we can take ownership of the contents easily here. + let (sub, sub_id) = stream.take().expect("should always be Some"); + this.stream = InnerStreamState::ReceivingEvents(sub); + return Poll::Ready(Some(Ok(FollowStreamMsg::Ready(sub_id)))); + } + InnerStreamState::ReceivingEvents(stream) => { + match stream.poll_next_unpin(cx) { + Poll::Pending => { + return Poll::Pending; + } + Poll::Ready(None) => { + // No error happened but the stream ended; restart and + // pass on a Stop message anyway. + this.stream = InnerStreamState::Stopped; + continue; + } + Poll::Ready(Some(Ok(ev))) => { + if let FollowEvent::Stop = ev { + // A stop event means the stream has ended, so start + // over after passing on the stop message. + this.stream = InnerStreamState::Stopped; + continue; + } + return Poll::Ready(Some(Ok(FollowStreamMsg::Event(ev)))); + } + Poll::Ready(Some(Err(e))) => { + // Re-start if a reconnecting backend was enabled. + if e.is_disconnected_will_reconnect() { + this.stream = InnerStreamState::Stopped; + continue; + } + + // Finish forever if there's an error, passing it on. + this.stream = InnerStreamState::Finished; + return Poll::Ready(Some(Err(e))); + } + } + } + InnerStreamState::Stopped => { + this.stream = InnerStreamState::New; + return Poll::Ready(Some(Ok(FollowStreamMsg::Event(FollowEvent::Stop)))); + } + InnerStreamState::Finished => { + return Poll::Ready(None); + } + } + } + } +} + +#[cfg(test)] +pub(super) mod test_utils { + use super::*; + use crate::config::substrate::H256; + use std::sync::Arc; + use std::sync::atomic::{AtomicUsize, Ordering}; + use subxt_rpcs::methods::chain_head::{BestBlockChanged, Finalized, Initialized, NewBlock}; + + /// Given some events, returns a follow stream getter that we can use in + /// place of the usual RPC method. + pub fn test_stream_getter(events: F) -> FollowEventStreamGetter + where + Hash: Send + 'static, + F: Fn() -> I + Send + 'static, + I: IntoIterator, BackendError>>, + { + let start_idx = Arc::new(AtomicUsize::new(0)); + + Box::new(move || { + // Start the events from where we left off last time. + let start_idx = start_idx.clone(); + let this_idx = start_idx.load(Ordering::Relaxed); + let events: Vec<_> = events().into_iter().skip(this_idx).collect(); + + Box::pin(async move { + // Increment start_idx for each event we see, so that if we get + // the stream again, we get only the remaining events for it. + let stream = futures::stream::iter(events).map(move |ev| { + start_idx.fetch_add(1, Ordering::Relaxed); + ev + }); + + let stream: FollowEventStream = Box::pin(stream); + Ok((stream, format!("sub_id_{this_idx}"))) + }) + }) + } + + /// An initialized event + pub fn ev_initialized(n: u64) -> FollowEvent { + FollowEvent::Initialized(Initialized { + finalized_block_hashes: vec![H256::from_low_u64_le(n)], + finalized_block_runtime: None, + }) + } + + /// A new block event + pub fn ev_new_block(parent_n: u64, n: u64) -> FollowEvent { + FollowEvent::NewBlock(NewBlock { + parent_block_hash: H256::from_low_u64_le(parent_n), + block_hash: H256::from_low_u64_le(n), + new_runtime: None, + }) + } + + /// A best block event + pub fn ev_best_block(n: u64) -> FollowEvent { + FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: H256::from_low_u64_le(n), + }) + } + + /// A finalized event + pub fn ev_finalized( + finalized_ns: impl IntoIterator, + pruned_ns: impl IntoIterator, + ) -> FollowEvent { + FollowEvent::Finalized(Finalized { + finalized_block_hashes: finalized_ns + .into_iter() + .map(H256::from_low_u64_le) + .collect(), + pruned_block_hashes: pruned_ns.into_iter().map(H256::from_low_u64_le).collect(), + }) + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use test_utils::{ev_initialized, ev_new_block, test_stream_getter}; + + #[tokio::test] + async fn follow_stream_provides_messages_until_error() { + // The events we'll get back on the stream. + let stream_getter = test_stream_getter(|| { + [ + Ok(ev_initialized(1)), + // Stop should lead to a drop and resubscribe: + Ok(FollowEvent::Stop), + Ok(FollowEvent::Stop), + Ok(ev_new_block(1, 2)), + // Nothing should be emitted after an error: + Err(BackendError::Other("ended".to_owned())), + Ok(ev_new_block(2, 3)), + ] + }); + + let s = FollowStream::new(stream_getter); + let out: Vec<_> = s.filter_map(async |e| e.ok()).collect().await; + + // The expected response, given the above. + assert_eq!( + out, + vec![ + FollowStreamMsg::Ready("sub_id_0".to_owned()), + FollowStreamMsg::Event(ev_initialized(1)), + FollowStreamMsg::Event(FollowEvent::Stop), + FollowStreamMsg::Ready("sub_id_2".to_owned()), + FollowStreamMsg::Event(FollowEvent::Stop), + FollowStreamMsg::Ready("sub_id_3".to_owned()), + FollowStreamMsg::Event(ev_new_block(1, 2)), + ] + ); + } +} diff --git a/new/src/backend/chain_head/follow_stream_driver.rs b/new/src/backend/chain_head/follow_stream_driver.rs new file mode 100644 index 0000000000..f1ff507729 --- /dev/null +++ b/new/src/backend/chain_head/follow_stream_driver.rs @@ -0,0 +1,755 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::follow_stream_unpin::{BlockRef, FollowStreamMsg, FollowStreamUnpin}; +use crate::config::Hash; +use crate::error::{BackendError, RpcError}; +use futures::stream::{Stream, StreamExt}; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::ops::DerefMut; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll, Waker}; +use subxt_rpcs::methods::chain_head::{FollowEvent, Initialized, RuntimeEvent}; + +/// A `Stream` which builds on `FollowStreamDriver`, and allows multiple subscribers to obtain events +/// from the single underlying subscription (each being provided an `Initialized` message and all new +/// blocks since then, as if they were each creating a unique `chainHead_follow` subscription). This +/// is the "top" layer of our follow stream subscriptions, and the one that's interacted with elsewhere. +#[derive(Debug)] +pub struct FollowStreamDriver { + inner: FollowStreamUnpin, + shared: Shared, +} + +impl FollowStreamDriver { + /// Create a new [`FollowStreamDriver`]. This must be polled by some executor + /// in order for any progress to be made. Things can subscribe to events. + pub fn new(follow_unpin: FollowStreamUnpin) -> Self { + Self { + inner: follow_unpin, + shared: Shared::default(), + } + } + + /// Return a handle from which we can create new subscriptions to follow events. + pub fn handle(&self) -> FollowStreamDriverHandle { + FollowStreamDriverHandle { + shared: self.shared.clone(), + } + } +} + +impl Stream for FollowStreamDriver { + type Item = Result<(), BackendError>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.inner.poll_next_unpin(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(None) => { + // Mark ourselves as done so that everything can end. + self.shared.done(); + Poll::Ready(None) + } + Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))), + Poll::Ready(Some(Ok(item))) => { + // Push item to any subscribers. + self.shared.push_item(item); + Poll::Ready(Some(Ok(()))) + } + } + } +} + +/// A handle that can be used to create subscribers, but that doesn't +/// itself subscribe to events. +#[derive(Debug, Clone)] +pub struct FollowStreamDriverHandle { + shared: Shared, +} + +impl FollowStreamDriverHandle { + /// Subscribe to follow events. + pub fn subscribe(&self) -> FollowStreamDriverSubscription { + self.shared.subscribe() + } +} + +/// A subscription to events from the [`FollowStreamDriver`]. All subscriptions +/// begin first with a `Ready` event containing the current subscription ID, and +/// then with an `Initialized` event containing the latest finalized block and latest +/// runtime information, and then any new/best block events and so on received since +/// the latest finalized block. +#[derive(Debug)] +pub struct FollowStreamDriverSubscription { + id: usize, + done: bool, + shared: Shared, + local_items: VecDeque>>, +} + +impl Stream for FollowStreamDriverSubscription { + type Item = FollowStreamMsg>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.done { + return Poll::Ready(None); + } + + loop { + if let Some(item) = self.local_items.pop_front() { + return Poll::Ready(Some(item)); + } + + let items = self.shared.take_items_and_save_waker(self.id, cx.waker()); + + // If no items left, mark locally as done (to avoid further locking) + // and return None to signal done-ness. + let Some(items) = items else { + self.done = true; + return Poll::Ready(None); + }; + + // No items? We've saved the waker so we'll be told when more come. + // Else, save the items locally and loop around to pop from them. + if items.is_empty() { + return Poll::Pending; + } else { + self.local_items = items; + } + } + } +} + +impl FollowStreamDriverSubscription { + /// Return the current subscription ID. If the subscription has stopped, then this will + /// wait until a new subscription has started with a new ID. + pub async fn subscription_id(self) -> Option { + let ready_event = self + .skip_while(|ev| std::future::ready(!matches!(ev, FollowStreamMsg::Ready(_)))) + .next() + .await?; + + match ready_event { + FollowStreamMsg::Ready(sub_id) => Some(sub_id), + _ => None, + } + } + + /// Subscribe to the follow events, ignoring any other messages. + pub fn events(self) -> impl Stream>> + Send + Sync { + self.filter_map(|ev| std::future::ready(ev.into_event())) + } +} + +impl Clone for FollowStreamDriverSubscription { + fn clone(&self) -> Self { + self.shared.subscribe() + } +} + +impl Drop for FollowStreamDriverSubscription { + fn drop(&mut self) { + self.shared.remove_sub(self.id); + } +} + +/// Locked shared state. The driver stream will access this state to push +/// events to any subscribers, and subscribers will access it to pull the +/// events destined for themselves. +#[derive(Debug, Clone)] +struct Shared(Arc>>); + +#[derive(Debug)] +struct SharedState { + done: bool, + next_id: usize, + subscribers: HashMap>, + /// Keep a buffer of all events that should be handed to a new subscription. + block_events_for_new_subscriptions: VecDeque>>, + // Keep track of the subscription ID we send out on new subs. + current_subscription_id: Option, + // Keep track of the init message we send out on new subs. + current_init_message: Option>>, + // Runtime events by block hash; we need to track these to know + // whether the runtime has changed when we see a finalized block notification. + seen_runtime_events: HashMap, +} + +impl Default for Shared { + fn default() -> Self { + Shared(Arc::new(Mutex::new(SharedState { + next_id: 1, + done: false, + subscribers: HashMap::new(), + current_init_message: None, + current_subscription_id: None, + seen_runtime_events: HashMap::new(), + block_events_for_new_subscriptions: VecDeque::new(), + }))) + } +} + +impl Shared { + /// Set the shared state to "done"; no more items will be handed to it. + pub fn done(&self) { + let mut shared = self.0.lock().unwrap(); + shared.done = true; + + // Wake up all subscribers so they get notified that the backend was closed + for details in shared.subscribers.values_mut() { + if let Some(waker) = details.waker.take() { + waker.wake(); + } + } + } + + /// Cleanup a subscription. + pub fn remove_sub(&self, sub_id: usize) { + let mut shared = self.0.lock().unwrap(); + shared.subscribers.remove(&sub_id); + } + + /// Take items for some subscription ID and save the waker. + pub fn take_items_and_save_waker( + &self, + sub_id: usize, + waker: &Waker, + ) -> Option>>> { + let mut shared = self.0.lock().unwrap(); + + let is_done = shared.done; + let details = shared.subscribers.get_mut(&sub_id)?; + + // no more items to pull, and stream closed, so return None. + if details.items.is_empty() && is_done { + return None; + } + + // else, take whatever items, and save the waker if not done yet. + let items = std::mem::take(&mut details.items); + if !is_done { + details.waker = Some(waker.clone()); + } + Some(items) + } + + /// Push a new item out to subscribers. + pub fn push_item(&self, item: FollowStreamMsg>) { + let mut shared = self.0.lock().unwrap(); + let shared = shared.deref_mut(); + + // broadcast item to subscribers: + for details in shared.subscribers.values_mut() { + details.items.push_back(item.clone()); + if let Some(waker) = details.waker.take() { + waker.wake(); + } + } + + // Keep our buffer of ready/block events up-to-date: + match item { + FollowStreamMsg::Ready(sub_id) => { + // Set new subscription ID when it comes in. + shared.current_subscription_id = Some(sub_id); + } + FollowStreamMsg::Event(FollowEvent::Initialized(ev)) => { + // New subscriptions will be given this init message: + shared.current_init_message = Some(ev.clone()); + // Clear block cache (since a new finalized block hash is seen): + shared.block_events_for_new_subscriptions.clear(); + } + FollowStreamMsg::Event(FollowEvent::Finalized(finalized_ev)) => { + // Update the init message that we'll hand out to new subscriptions. If the init message + // is `None` for some reason, we just ignore this step. + if let Some(init_message) = &mut shared.current_init_message { + // Find the latest runtime update that's been finalized. + let newest_runtime = finalized_ev + .finalized_block_hashes + .iter() + .rev() + .filter_map(|h| shared.seen_runtime_events.get(&h.hash()).cloned()) + .next(); + + shared.seen_runtime_events.clear(); + + init_message + .finalized_block_hashes + .clone_from(&finalized_ev.finalized_block_hashes); + + if let Some(runtime_ev) = newest_runtime { + init_message.finalized_block_runtime = Some(runtime_ev); + } + } + + // The last finalized block will be reported as Initialized by our driver, + // therefore there is no need to report NewBlock and BestBlock events for it. + // If the Finalized event reported multiple finalized hashes, we only care about + // the state at the head of the chain, therefore it is correct to remove those as well. + // Idem for the pruned hashes; they will never be reported again and we remove + // them from the window of events. + let to_remove: HashSet = finalized_ev + .finalized_block_hashes + .iter() + .chain(finalized_ev.pruned_block_hashes.iter()) + .map(|h| h.hash()) + .collect(); + + shared + .block_events_for_new_subscriptions + .retain(|ev| match ev { + FollowEvent::NewBlock(new_block_ev) => { + !to_remove.contains(&new_block_ev.block_hash.hash()) + } + FollowEvent::BestBlockChanged(best_block_ev) => { + !to_remove.contains(&best_block_ev.best_block_hash.hash()) + } + _ => true, + }); + } + FollowStreamMsg::Event(FollowEvent::NewBlock(new_block_ev)) => { + // If a new runtime is seen, note it so that when a block is finalized, we + // can associate that with a runtime update having happened. + if let Some(runtime_event) = &new_block_ev.new_runtime { + shared + .seen_runtime_events + .insert(new_block_ev.block_hash.hash(), runtime_event.clone()); + } + + shared + .block_events_for_new_subscriptions + .push_back(FollowEvent::NewBlock(new_block_ev)); + } + FollowStreamMsg::Event(ev @ FollowEvent::BestBlockChanged(_)) => { + shared.block_events_for_new_subscriptions.push_back(ev); + } + FollowStreamMsg::Event(FollowEvent::Stop) => { + // On a stop event, clear everything. Wait for resubscription and new ready/initialised events. + shared.block_events_for_new_subscriptions.clear(); + shared.current_subscription_id = None; + shared.current_init_message = None; + } + _ => { + // We don't buffer any other events. + } + } + } + + /// Create a new subscription. + pub fn subscribe(&self) -> FollowStreamDriverSubscription { + let mut shared = self.0.lock().unwrap(); + + let id = shared.next_id; + shared.next_id += 1; + + shared.subscribers.insert( + id, + SubscriberDetails { + items: VecDeque::new(), + waker: None, + }, + ); + + // Any new subscription should start with a "Ready" message and then an "Initialized" + // message, and then any non-finalized block events since that. If these don't exist, + // it means the subscription is currently stopped, and we should expect new Ready/Init + // messages anyway once it restarts. + let mut local_items = VecDeque::new(); + if let Some(sub_id) = &shared.current_subscription_id { + local_items.push_back(FollowStreamMsg::Ready(sub_id.clone())); + } + if let Some(init_msg) = &shared.current_init_message { + local_items.push_back(FollowStreamMsg::Event(FollowEvent::Initialized( + init_msg.clone(), + ))); + } + for ev in &shared.block_events_for_new_subscriptions { + local_items.push_back(FollowStreamMsg::Event(ev.clone())); + } + + drop(shared); + + FollowStreamDriverSubscription { + id, + done: false, + shared: self.clone(), + local_items, + } + } +} + +/// Details for a given subscriber: any items it's not yet claimed, +/// and a way to wake it up when there are more items for it. +#[derive(Debug)] +struct SubscriberDetails { + items: VecDeque>>, + waker: Option, +} + +/// A stream that subscribes to finalized blocks +/// and indicates whether a block was missed if was restarted. +#[derive(Debug)] +pub struct FollowStreamFinalizedHeads { + stream: FollowStreamDriverSubscription, + sub_id: Option, + last_seen_block: Option>, + f: F, + is_done: bool, +} + +impl Unpin for FollowStreamFinalizedHeads {} + +impl FollowStreamFinalizedHeads +where + H: Hash, + F: Fn(FollowEvent>) -> Vec>, +{ + pub fn new(stream: FollowStreamDriverSubscription, f: F) -> Self { + Self { + stream, + sub_id: None, + last_seen_block: None, + f, + is_done: false, + } + } +} + +impl Stream for FollowStreamFinalizedHeads +where + H: Hash, + F: Fn(FollowEvent>) -> Vec>, +{ + type Item = Result<(String, Vec>), BackendError>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.is_done { + return Poll::Ready(None); + } + + loop { + let Some(ev) = futures::ready!(self.stream.poll_next_unpin(cx)) else { + self.is_done = true; + return Poll::Ready(None); + }; + + let block_refs = match ev { + FollowStreamMsg::Ready(sub_id) => { + self.sub_id = Some(sub_id); + continue; + } + FollowStreamMsg::Event(FollowEvent::Finalized(finalized)) => { + self.last_seen_block = finalized.finalized_block_hashes.last().cloned(); + + (self.f)(FollowEvent::Finalized(finalized)) + } + FollowStreamMsg::Event(FollowEvent::Initialized(mut init)) => { + let prev = self.last_seen_block.take(); + self.last_seen_block = init.finalized_block_hashes.last().cloned(); + + if let Some(p) = prev { + let Some(pos) = init + .finalized_block_hashes + .iter() + .position(|b| b.hash() == p.hash()) + else { + return Poll::Ready(Some(Err(RpcError::ClientError( + subxt_rpcs::Error::DisconnectedWillReconnect( + "Missed at least one block when the connection was lost" + .to_owned(), + ), + ) + .into()))); + }; + + // If we got older blocks than `prev`, we need to remove them + // because they should already have been sent at this point. + init.finalized_block_hashes.drain(0..=pos); + } + + (self.f)(FollowEvent::Initialized(init)) + } + FollowStreamMsg::Event(ev) => (self.f)(ev), + }; + + if block_refs.is_empty() { + continue; + } + + let sub_id = self + .sub_id + .clone() + .expect("Ready is always emitted before any other event"); + + return Poll::Ready(Some(Ok((sub_id, block_refs)))); + } + } +} + +#[cfg(test)] +mod test_utils { + use super::super::follow_stream_unpin::test_utils::test_unpin_stream_getter; + use super::*; + + /// Return a `FollowStreamDriver` + pub fn test_follow_stream_driver_getter( + events: F, + max_life: usize, + ) -> FollowStreamDriver + where + H: Hash + 'static, + F: Fn() -> I + Send + 'static, + I: IntoIterator, BackendError>>, + { + let (stream, _) = test_unpin_stream_getter(events, max_life); + FollowStreamDriver::new(stream) + } +} + +#[cfg(test)] +mod test { + use futures::TryStreamExt; + use primitive_types::H256; + + use super::super::follow_stream::test_utils::{ + ev_best_block, ev_finalized, ev_initialized, ev_new_block, + }; + use super::super::follow_stream_unpin::test_utils::{ + ev_best_block_ref, ev_finalized_ref, ev_initialized_ref, ev_new_block_ref, + }; + use super::test_utils::test_follow_stream_driver_getter; + use super::*; + + #[test] + fn follow_stream_driver_is_sendable() { + fn assert_send(_: T) {} + let stream_getter = test_follow_stream_driver_getter(|| [Ok(ev_initialized(1))], 10); + assert_send(stream_getter); + } + + #[tokio::test] + async fn subscribers_all_receive_events_and_finish_gracefully_on_error() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_best_block(1)), + Ok(ev_finalized([1], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let handle = driver.handle(); + + let a = handle.subscribe(); + let b = handle.subscribe(); + let c = handle.subscribe(); + + // Drive to completion (the sort of real life usage I'd expect): + tokio::spawn(async move { while driver.next().await.is_some() {} }); + + let a_vec: Vec<_> = a.collect().await; + let b_vec: Vec<_> = b.collect().await; + let c_vec: Vec<_> = c.collect().await; + + let expected = vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_initialized_ref(0)), + FollowStreamMsg::Event(ev_new_block_ref(0, 1)), + FollowStreamMsg::Event(ev_best_block_ref(1)), + FollowStreamMsg::Event(ev_finalized_ref([1])), + ]; + + assert_eq!(a_vec, expected); + assert_eq!(b_vec, expected); + assert_eq!(c_vec, expected); + } + + #[tokio::test] + async fn subscribers_receive_block_events_from_last_finalised() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_best_block(1)), + Ok(ev_finalized([1], [])), + Ok(ev_new_block(1, 2)), + Ok(ev_new_block(2, 3)), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + // Skip past ready, init, new, best events. + let _r = driver.next().await.unwrap(); + let _i0 = driver.next().await.unwrap(); + let _n1 = driver.next().await.unwrap(); + let _b1 = driver.next().await.unwrap(); + + // THEN subscribe; subscription should still receive them: + let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await; + let expected = vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_initialized_ref(0)), + FollowStreamMsg::Event(ev_new_block_ref(0, 1)), + FollowStreamMsg::Event(ev_best_block_ref(1)), + ]; + assert_eq!(evs, expected); + + // Skip past finalized 1, new 2, new 3 events + let _f1 = driver.next().await.unwrap(); + let _n2 = driver.next().await.unwrap(); + let _n3 = driver.next().await.unwrap(); + + // THEN subscribe again; new subs will see an updated initialized message + // with the latest finalized block hash. + let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await; + let expected = vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_initialized_ref(1)), + FollowStreamMsg::Event(ev_new_block_ref(1, 2)), + FollowStreamMsg::Event(ev_new_block_ref(2, 3)), + ]; + assert_eq!(evs, expected); + } + + #[tokio::test] + async fn subscribers_receive_new_blocks_before_subscribing() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_best_block(1)), + Ok(ev_new_block(1, 2)), + Ok(ev_new_block(2, 3)), + Ok(ev_finalized([1], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + // Skip to the first finalized block F1. + let _r = driver.next().await.unwrap(); + let _i0 = driver.next().await.unwrap(); + let _n1 = driver.next().await.unwrap(); + let _b1 = driver.next().await.unwrap(); + let _n2 = driver.next().await.unwrap(); + let _n3 = driver.next().await.unwrap(); + let _f1 = driver.next().await.unwrap(); + + // THEN subscribe; and make sure new block 1 and 2 are received. + let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await; + let expected = vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_initialized_ref(1)), + FollowStreamMsg::Event(ev_new_block_ref(1, 2)), + FollowStreamMsg::Event(ev_new_block_ref(2, 3)), + ]; + assert_eq!(evs, expected); + } + + #[tokio::test] + async fn subscribe_finalized_blocks_restart_works() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_best_block(1)), + Ok(ev_finalized([1], [])), + Ok(FollowEvent::Stop), + Ok(ev_initialized(1)), + Ok(ev_finalized([2], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let handle = driver.handle(); + + tokio::spawn(async move { while driver.next().await.is_some() {} }); + + let f = |ev| match ev { + FollowEvent::Finalized(ev) => ev.finalized_block_hashes, + FollowEvent::Initialized(ev) => ev.finalized_block_hashes, + _ => vec![], + }; + + let stream = FollowStreamFinalizedHeads::new(handle.subscribe(), f); + let evs: Vec<_> = stream.try_collect().await.unwrap(); + + let expected = vec![ + ( + "sub_id_0".to_string(), + vec![BlockRef::new(H256::from_low_u64_le(0))], + ), + ( + "sub_id_0".to_string(), + vec![BlockRef::new(H256::from_low_u64_le(1))], + ), + ( + "sub_id_5".to_string(), + vec![BlockRef::new(H256::from_low_u64_le(2))], + ), + ]; + assert_eq!(evs, expected); + } + + #[tokio::test] + async fn subscribe_finalized_blocks_restart_with_missed_blocks() { + let mut driver = test_follow_stream_driver_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(FollowEvent::Stop), + // Emulate that we missed some blocks. + Ok(ev_initialized(13)), + Ok(ev_finalized([14], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let handle = driver.handle(); + + tokio::spawn(async move { while driver.next().await.is_some() {} }); + + let f = |ev| match ev { + FollowEvent::Finalized(ev) => ev.finalized_block_hashes, + FollowEvent::Initialized(ev) => ev.finalized_block_hashes, + _ => vec![], + }; + + let evs: Vec<_> = FollowStreamFinalizedHeads::new(handle.subscribe(), f) + .collect() + .await; + + assert_eq!( + evs[0].as_ref().unwrap(), + &( + "sub_id_0".to_string(), + vec![BlockRef::new(H256::from_low_u64_le(0))] + ) + ); + assert!( + matches!(&evs[1], Err(BackendError::Rpc(RpcError::ClientError(subxt_rpcs::Error::DisconnectedWillReconnect(e)))) if e.contains("Missed at least one block when the connection was lost")) + ); + assert_eq!( + evs[2].as_ref().unwrap(), + &( + "sub_id_2".to_string(), + vec![BlockRef::new(H256::from_low_u64_le(14))] + ) + ); + } +} diff --git a/new/src/backend/chain_head/follow_stream_unpin.rs b/new/src/backend/chain_head/follow_stream_unpin.rs new file mode 100644 index 0000000000..3be783552d --- /dev/null +++ b/new/src/backend/chain_head/follow_stream_unpin.rs @@ -0,0 +1,813 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::ChainHeadRpcMethods; +use super::follow_stream::FollowStream; +use crate::config::{Config, Hash, HashFor, RpcConfigFor}; +use crate::error::BackendError; +use futures::stream::{FuturesUnordered, Stream, StreamExt}; +use subxt_rpcs::methods::chain_head::{ + BestBlockChanged, Finalized, FollowEvent, Initialized, NewBlock, +}; + +use std::collections::{HashMap, HashSet}; +use std::future::Future; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::{Context, Poll, Waker}; + +/// The type of stream item. +pub use super::follow_stream::FollowStreamMsg; + +/// A `Stream` which builds on `FollowStream`, and handles pinning. It replaces any block hash seen in +/// the follow events with a `BlockRef` which, when all clones are dropped, will lead to an "unpin" call +/// for that block hash being queued. It will also automatically unpin any blocks that exceed a given max +/// age, to try and prevent the underlying stream from ending (and _all_ blocks from being unpinned as a +/// result). Put simply, it tries to keep every block pinned as long as possible until the block is no longer +/// used anywhere. +#[derive(Debug)] +pub struct FollowStreamUnpin { + // The underlying stream of events. + inner: FollowStream, + // A method to call to unpin a block, given a block hash and a subscription ID. + unpin_method: UnpinMethodHolder, + // Futures for sending unpin events that we'll poll to completion as + // part of polling the stream as a whole. + unpin_futs: FuturesUnordered, + // Each time a new finalized block is seen, we give it an age of `next_rel_block_age`, + // and then increment this ready for the next finalized block. So, the first finalized + // block will have an age of 0, the next 1, 2, 3 and so on. We can then use `max_block_life` + // to say "unpin all blocks with an age < (next_rel_block_age-1) - max_block_life". + next_rel_block_age: usize, + // The latest ID of the FollowStream subscription, which we can use + // to unpin blocks. + subscription_id: Option>, + // The longest period a block can be pinned for. + max_block_life: usize, + // The currently seen and pinned blocks. + pinned: HashMap>, + // Shared state about blocks we've flagged to unpin from elsewhere + unpin_flags: UnpinFlags, +} + +// Just a wrapper to make implementing debug on the whole thing easier. +struct UnpinMethodHolder(UnpinMethod); +impl std::fmt::Debug for UnpinMethodHolder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "UnpinMethodHolder(Box) -> UnpinFut>)" + ) + } +} + +/// The type of the unpin method that we need to provide. +pub type UnpinMethod = Box) -> UnpinFut + Send>; + +/// The future returned from [`UnpinMethod`]. +pub type UnpinFut = Pin + Send + 'static>>; + +impl std::marker::Unpin for FollowStreamUnpin {} + +impl Stream for FollowStreamUnpin { + type Item = Result>, BackendError>; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.as_mut(); + + loop { + // Poll any queued unpin tasks. + let unpin_futs_are_pending = match this.unpin_futs.poll_next_unpin(cx) { + Poll::Ready(Some(())) => continue, + Poll::Ready(None) => false, + Poll::Pending => true, + }; + + // Poll the inner stream for the next event. + let Poll::Ready(ev) = this.inner.poll_next_unpin(cx) else { + return Poll::Pending; + }; + + let Some(ev) = ev else { + // if the stream is done, but `unpin_futs` are still pending, then + // return pending here so that they are still driven to completion. + // Else, return `Ready(None)` to signal nothing left to do. + return match unpin_futs_are_pending { + true => Poll::Pending, + false => Poll::Ready(None), + }; + }; + + // Error? just return it and do nothing further. + let ev = match ev { + Ok(ev) => ev, + Err(e) => { + return Poll::Ready(Some(Err(e))); + } + }; + + // React to any actual FollowEvent we get back. + let ev = match ev { + FollowStreamMsg::Ready(subscription_id) => { + // update the subscription ID we'll use to unpin things. + this.subscription_id = Some(subscription_id.clone().into()); + + FollowStreamMsg::Ready(subscription_id) + } + FollowStreamMsg::Event(FollowEvent::Initialized(details)) => { + let mut finalized_block_hashes = + Vec::with_capacity(details.finalized_block_hashes.len()); + + // Pin each of the finalized blocks. None of them will show up again (except as a + // parent block), and so they can all be unpinned immediately at any time. Increment + // the block age for each one, so that older finalized blocks are pruned first. + for finalized_block in &details.finalized_block_hashes { + let rel_block_age = this.next_rel_block_age; + let block_ref = + this.pin_unpinnable_block_at(rel_block_age, *finalized_block); + + finalized_block_hashes.push(block_ref); + this.next_rel_block_age += 1; + } + + FollowStreamMsg::Event(FollowEvent::Initialized(Initialized { + finalized_block_hashes, + finalized_block_runtime: details.finalized_block_runtime, + })) + } + FollowStreamMsg::Event(FollowEvent::NewBlock(details)) => { + // One bigger than our parent, and if no parent seen (maybe it was + // unpinned already), then one bigger than the last finalized block num + // as a best guess. + let parent_rel_block_age = this + .pinned + .get(&details.parent_block_hash) + .map(|p| p.rel_block_age) + .unwrap_or(this.next_rel_block_age.saturating_sub(1)); + + let block_ref = this.pin_block_at(parent_rel_block_age + 1, details.block_hash); + let parent_block_ref = + this.pin_block_at(parent_rel_block_age, details.parent_block_hash); + + FollowStreamMsg::Event(FollowEvent::NewBlock(NewBlock { + block_hash: block_ref, + parent_block_hash: parent_block_ref, + new_runtime: details.new_runtime, + })) + } + FollowStreamMsg::Event(FollowEvent::BestBlockChanged(details)) => { + // We expect this block to already exist, so it'll keep its existing block_num, + // but worst case it'll just get the current finalized block_num + 1. + let rel_block_age = this.next_rel_block_age; + let block_ref = this.pin_block_at(rel_block_age, details.best_block_hash); + + FollowStreamMsg::Event(FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: block_ref, + })) + } + FollowStreamMsg::Event(FollowEvent::Finalized(details)) => { + let finalized_block_refs: Vec<_> = details + .finalized_block_hashes + .into_iter() + .enumerate() + .map(|(idx, hash)| { + // These blocks _should_ exist already and so will have a known block num, + // but if they don't, we just increment the num from the last finalized block + // we saw, which should be accurate. + // + // `pin_unpinnable_block_at` indicates that the block will not show up in future events + // (They will show up as a parent block, but we don't care about that right now). + let rel_block_age = this.next_rel_block_age + idx; + this.pin_unpinnable_block_at(rel_block_age, hash) + }) + .collect(); + + // Our relative block height is increased by however many finalized + // blocks we've seen. + this.next_rel_block_age += finalized_block_refs.len(); + + let pruned_block_refs: Vec<_> = details + .pruned_block_hashes + .into_iter() + .map(|hash| { + // We should know about these, too, and if not we set their age to last_finalized + 1. + // + // `pin_unpinnable_block_at` indicates that the block will not show up in future events. + let rel_block_age = this.next_rel_block_age; + this.pin_unpinnable_block_at(rel_block_age, hash) + }) + .collect(); + + // At this point, we also check to see which blocks we should submit unpin events + // for. We will unpin: + // - Any block that's older than the max age. + // - Any block that has no references left (ie has been dropped) that _also_ has + // showed up in the pruned list in a finalized event (so it will never be in another event). + this.unpin_blocks(cx.waker()); + + FollowStreamMsg::Event(FollowEvent::Finalized(Finalized { + finalized_block_hashes: finalized_block_refs, + pruned_block_hashes: pruned_block_refs, + })) + } + FollowStreamMsg::Event(FollowEvent::Stop) => { + // clear out "old" things that are no longer applicable since + // the subscription has ended (a new one will be created under the hood, at + // which point we'll get given a new subscription ID. + this.subscription_id = None; + this.pinned.clear(); + this.unpin_futs.clear(); + this.unpin_flags.lock().unwrap().clear(); + this.next_rel_block_age = 0; + + FollowStreamMsg::Event(FollowEvent::Stop) + } + // These events aren't interesting; we just forward them on: + FollowStreamMsg::Event(FollowEvent::OperationBodyDone(details)) => { + FollowStreamMsg::Event(FollowEvent::OperationBodyDone(details)) + } + FollowStreamMsg::Event(FollowEvent::OperationCallDone(details)) => { + FollowStreamMsg::Event(FollowEvent::OperationCallDone(details)) + } + FollowStreamMsg::Event(FollowEvent::OperationStorageItems(details)) => { + FollowStreamMsg::Event(FollowEvent::OperationStorageItems(details)) + } + FollowStreamMsg::Event(FollowEvent::OperationWaitingForContinue(details)) => { + FollowStreamMsg::Event(FollowEvent::OperationWaitingForContinue(details)) + } + FollowStreamMsg::Event(FollowEvent::OperationStorageDone(details)) => { + FollowStreamMsg::Event(FollowEvent::OperationStorageDone(details)) + } + FollowStreamMsg::Event(FollowEvent::OperationInaccessible(details)) => { + FollowStreamMsg::Event(FollowEvent::OperationInaccessible(details)) + } + FollowStreamMsg::Event(FollowEvent::OperationError(details)) => { + FollowStreamMsg::Event(FollowEvent::OperationError(details)) + } + }; + + // Return our event. + return Poll::Ready(Some(Ok(ev))); + } + } +} + +impl FollowStreamUnpin { + /// Create a new [`FollowStreamUnpin`]. + pub fn new( + follow_stream: FollowStream, + unpin_method: UnpinMethod, + max_block_life: usize, + ) -> Self { + Self { + inner: follow_stream, + unpin_method: UnpinMethodHolder(unpin_method), + max_block_life, + pinned: Default::default(), + subscription_id: None, + next_rel_block_age: 0, + unpin_flags: Default::default(), + unpin_futs: Default::default(), + } + } + + /// Create a new [`FollowStreamUnpin`] given the RPC methods. + pub fn from_methods( + follow_stream: FollowStream>, + methods: ChainHeadRpcMethods>, + max_block_life: usize, + ) -> FollowStreamUnpin> { + let unpin_method = Box::new(move |hash: HashFor, sub_id: Arc| { + let methods = methods.clone(); + let fut: UnpinFut = Box::pin(async move { + // We ignore any errors trying to unpin at the moment. + let _ = methods.chainhead_v1_unpin(&sub_id, hash).await; + }); + fut + }); + + FollowStreamUnpin::new(follow_stream, unpin_method, max_block_life) + } + + /// Is the block hash currently pinned. + pub fn is_pinned(&self, hash: &H) -> bool { + self.pinned.contains_key(hash) + } + + /// Pin a block, or return the reference to an already-pinned block. If the block has been registered to + /// be unpinned, we'll clear those flags, so that it won't be unpinned. If the unpin request has already + /// been sent though, then the block will be unpinned. + fn pin_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef { + self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, false) + } + + /// Pin a block, or return the reference to an already-pinned block. + /// + /// This is the same as [`Self::pin_block_at`], except that it also marks the block as being unpinnable now, + /// which should be done for any block that will no longer be seen in future events. + fn pin_unpinnable_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef { + self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, true) + } + + fn pin_block_at_setting_unpinnable_flag( + &mut self, + rel_block_age: usize, + hash: H, + can_be_unpinned: bool, + ) -> BlockRef { + let entry = self + .pinned + .entry(hash) + // If there's already an entry, then clear any unpin_flags and update the + // can_be_unpinned status (this can become true but cannot become false again + // once true). + .and_modify(|entry| { + entry.can_be_unpinned = entry.can_be_unpinned || can_be_unpinned; + self.unpin_flags.lock().unwrap().remove(&hash); + }) + // If there's not an entry already, make one and return it. + .or_insert_with(|| PinnedDetails { + rel_block_age, + block_ref: BlockRef { + inner: Arc::new(BlockRefInner { + hash, + unpin_flags: self.unpin_flags.clone(), + }), + }, + can_be_unpinned, + }); + + entry.block_ref.clone() + } + + /// Unpin any blocks that are either too old, or have the unpin flag set and are old enough. + fn unpin_blocks(&mut self, waker: &Waker) { + let mut unpin_flags = self.unpin_flags.lock().unwrap(); + + // This gets the age of the last finalized block. + let rel_block_age = self.next_rel_block_age.saturating_sub(1); + + // If we asked to unpin and there was no subscription_id, then there's nothing we can do, + // and nothing will need unpinning now anyway. + let Some(sub_id) = &self.subscription_id else { + return; + }; + + let mut blocks_to_unpin = vec![]; + for (hash, details) in &self.pinned { + if rel_block_age.saturating_sub(details.rel_block_age) >= self.max_block_life + || (unpin_flags.contains(hash) && details.can_be_unpinned) + { + // The block is too old, or it's been flagged to be unpinned and won't be in a future + // backend event, so we can unpin it for real now. + blocks_to_unpin.push(*hash); + // Clear it from our unpin flags if present so that we don't try to unpin it again. + unpin_flags.remove(hash); + } + } + + // Release our lock on unpin_flags ASAP. + drop(unpin_flags); + + // No need to call the waker etc if nothing to do: + if blocks_to_unpin.is_empty() { + return; + } + + for hash in blocks_to_unpin { + self.pinned.remove(&hash); + let fut = (self.unpin_method.0)(hash, sub_id.clone()); + self.unpin_futs.push(fut); + } + + // Any new futures pushed above need polling to start. We could + // just wait for the next stream event, but let's wake the task to + // have it polled sooner, just in case it's slow to receive things. + waker.wake_by_ref(); + } +} + +// The set of block hashes that can be unpinned when ready. +// BlockRefs write to this when they are dropped. +type UnpinFlags = Arc>>; + +#[derive(Debug)] +struct PinnedDetails { + /// Relatively speaking, how old is the block? When we start following + /// blocks, the first finalized block gets an age of 0, the second an age + /// of 1 and so on. + rel_block_age: usize, + /// A block ref we can hand out to keep blocks pinned. + /// Because we store one here until it's unpinned, the live count + /// will only drop to 1 when no external refs are left. + block_ref: BlockRef, + /// Has this block showed up in the list of pruned blocks, or has it + /// been finalized? In this case, it can now been pinned as it won't + /// show up again in future events (except as a "parent block" of some + /// new block, which we're currently ignoring). + can_be_unpinned: bool, +} + +/// All blocks reported will be wrapped in this. +#[derive(Debug, Clone)] +pub struct BlockRef { + inner: Arc>, +} + +#[derive(Debug)] +struct BlockRefInner { + hash: H, + unpin_flags: UnpinFlags, +} + +impl BlockRef { + /// For testing purposes only, create a BlockRef from a hash + /// that isn't pinned. + #[cfg(test)] + pub fn new(hash: H) -> Self { + BlockRef { + inner: Arc::new(BlockRefInner { + hash, + unpin_flags: Default::default(), + }), + } + } + + /// Return the hash for this block. + pub fn hash(&self) -> H { + self.inner.hash + } +} + +impl PartialEq for BlockRef { + fn eq(&self, other: &Self) -> bool { + self.inner.hash == other.inner.hash + } +} + +impl PartialEq for BlockRef { + fn eq(&self, other: &H) -> bool { + &self.inner.hash == other + } +} + +impl Drop for BlockRef { + fn drop(&mut self) { + // PinnedDetails keeps one ref, so if this is the second ref, it's the + // only "external" one left and we should ask to unpin it now. if it's + // the only ref remaining, it means that it's already been unpinned, so + // nothing to do here anyway. + if Arc::strong_count(&self.inner) == 2 { + if let Ok(mut unpin_flags) = self.inner.unpin_flags.lock() { + unpin_flags.insert(self.inner.hash); + } + } + } +} + +#[cfg(test)] +pub(super) mod test_utils { + use super::super::follow_stream::{FollowStream, test_utils::test_stream_getter}; + use super::*; + use crate::config::substrate::H256; + + pub type UnpinRx = std::sync::mpsc::Receiver<(H, Arc)>; + + /// Get a [`FollowStreamUnpin`] from an iterator over events. + pub fn test_unpin_stream_getter( + events: F, + max_life: usize, + ) -> (FollowStreamUnpin, UnpinRx) + where + H: Hash + 'static, + F: Fn() -> I + Send + 'static, + I: IntoIterator, BackendError>>, + { + // Unpin requests will come here so that we can look out for them. + let (unpin_tx, unpin_rx) = std::sync::mpsc::channel(); + + let follow_stream = FollowStream::new(test_stream_getter(events)); + let unpin_method: UnpinMethod = Box::new(move |hash, sub_id| { + unpin_tx.send((hash, sub_id)).unwrap(); + Box::pin(std::future::ready(())) + }); + + let follow_unpin = FollowStreamUnpin::new(follow_stream, unpin_method, max_life); + (follow_unpin, unpin_rx) + } + + /// Assert that the unpinned blocks sent from the `UnpinRx` channel match the items given. + pub fn assert_from_unpin_rx( + unpin_rx: &UnpinRx, + items: impl IntoIterator, + ) { + let expected_hashes = HashSet::::from_iter(items); + for i in 0..expected_hashes.len() { + let Ok((hash, _)) = unpin_rx.try_recv() else { + panic!("Another unpin event is expected, but failed to pull item {i} from channel"); + }; + assert!( + expected_hashes.contains(&hash), + "Hash {hash:?} was unpinned, but is not expected to have been" + ); + } + } + + /// An initialized event containing a BlockRef (useful for comparisons) + pub fn ev_initialized_ref(n: u64) -> FollowEvent> { + FollowEvent::Initialized(Initialized { + finalized_block_hashes: vec![BlockRef::new(H256::from_low_u64_le(n))], + finalized_block_runtime: None, + }) + } + + /// A new block event containing a BlockRef (useful for comparisons) + pub fn ev_new_block_ref(parent: u64, n: u64) -> FollowEvent> { + FollowEvent::NewBlock(NewBlock { + parent_block_hash: BlockRef::new(H256::from_low_u64_le(parent)), + block_hash: BlockRef::new(H256::from_low_u64_le(n)), + new_runtime: None, + }) + } + + /// A best block event containing a BlockRef (useful for comparisons) + pub fn ev_best_block_ref(n: u64) -> FollowEvent> { + FollowEvent::BestBlockChanged(BestBlockChanged { + best_block_hash: BlockRef::new(H256::from_low_u64_le(n)), + }) + } + + /// A finalized event containing a BlockRef (useful for comparisons) + pub fn ev_finalized_ref(ns: impl IntoIterator) -> FollowEvent> { + FollowEvent::Finalized(Finalized { + finalized_block_hashes: ns + .into_iter() + .map(|h| BlockRef::new(H256::from_low_u64_le(h))) + .collect(), + pruned_block_hashes: vec![], + }) + } +} + +#[cfg(test)] +mod test { + use super::super::follow_stream::test_utils::{ + ev_best_block, ev_finalized, ev_initialized, ev_new_block, + }; + use super::test_utils::{assert_from_unpin_rx, ev_new_block_ref, test_unpin_stream_getter}; + use super::*; + use crate::config::substrate::H256; + + #[tokio::test] + async fn hands_back_blocks() { + let (follow_unpin, _) = test_unpin_stream_getter( + || { + [ + Ok(ev_new_block(0, 1)), + Ok(ev_new_block(1, 2)), + Ok(ev_new_block(2, 3)), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let out: Vec<_> = follow_unpin.filter_map(async |e| e.ok()).collect().await; + + assert_eq!( + out, + vec![ + FollowStreamMsg::Ready("sub_id_0".into()), + FollowStreamMsg::Event(ev_new_block_ref(0, 1)), + FollowStreamMsg::Event(ev_new_block_ref(1, 2)), + FollowStreamMsg::Event(ev_new_block_ref(2, 3)), + ] + ); + } + + #[tokio::test] + async fn unpins_initialized_block() { + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_finalized([1], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 3, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + + // Drop the initialized block: + let i0 = follow_unpin.next().await.unwrap().unwrap(); + drop(i0); + + // Let a finalization event occur. + let _f1 = follow_unpin.next().await.unwrap().unwrap(); + + // Now, initialized block should be unpinned. + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]); + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(0))); + } + + #[tokio::test] + async fn unpins_old_blocks() { + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_finalized([1], [])), + Ok(ev_finalized([2], [])), + Ok(ev_finalized([3], [])), + Ok(ev_finalized([4], [])), + Ok(ev_finalized([5], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 3, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + let _i0 = follow_unpin.next().await.unwrap().unwrap(); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + let _f1 = follow_unpin.next().await.unwrap().unwrap(); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + let _f2 = follow_unpin.next().await.unwrap().unwrap(); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + let _f3 = follow_unpin.next().await.unwrap().unwrap(); + + // Max age is 3, so after block 3 finalized, block 0 becomes too old and is unpinned. + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]); + + let _f4 = follow_unpin.next().await.unwrap().unwrap(); + + // Block 1 is now too old and is unpinned. + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); + + let _f5 = follow_unpin.next().await.unwrap().unwrap(); + + // Block 2 is now too old and is unpinned. + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(2)]); + } + + #[tokio::test] + async fn dropped_new_blocks_should_not_get_unpinned_until_finalization() { + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_new_block(1, 2)), + Ok(ev_finalized([1], [])), + Ok(ev_finalized([2], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + let _i0 = follow_unpin.next().await.unwrap().unwrap(); + + let n1 = follow_unpin.next().await.unwrap().unwrap(); + drop(n1); + let n2 = follow_unpin.next().await.unwrap().unwrap(); + drop(n2); + + // New blocks dropped but still pinned: + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + + let f1 = follow_unpin.next().await.unwrap().unwrap(); + drop(f1); + + // After block 1 finalized, both blocks are still pinned because: + // - block 1 was handed back in the finalized event, so will be unpinned next time. + // - block 2 wasn't mentioned in the finalized event, so should not have been unpinned yet. + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + + let f2 = follow_unpin.next().await.unwrap().unwrap(); + drop(f2); + + // After block 2 finalized, block 1 can be unpinned finally, but block 2 needs to wait one more event. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); + } + + #[tokio::test] + async fn dropped_new_blocks_should_not_get_unpinned_until_pruned() { + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_new_block(1, 2)), + Ok(ev_new_block(1, 3)), + Ok(ev_finalized([1], [])), + Ok(ev_finalized([2], [3])), + Ok(ev_finalized([4], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + let _i0 = follow_unpin.next().await.unwrap().unwrap(); + + let n1 = follow_unpin.next().await.unwrap().unwrap(); + drop(n1); + let n2 = follow_unpin.next().await.unwrap().unwrap(); + drop(n2); + let n3 = follow_unpin.next().await.unwrap().unwrap(); + drop(n3); + + let f1 = follow_unpin.next().await.unwrap().unwrap(); + drop(f1); + + // After block 1 is finalized, everything is still pinned because the finalization event + // itself returns 1, and 2/3 aren't finalized or pruned yet. + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(3))); + + let f2 = follow_unpin.next().await.unwrap().unwrap(); + drop(f2); + + // After the next finalization event, block 1 can finally be unpinned since it was Finalized + // last event _and_ is no longer handed back anywhere. 2 and 3 should still be pinned. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(3))); + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); + + let f4 = follow_unpin.next().await.unwrap().unwrap(); + drop(f4); + + // After some other finalized event, we are now allowed to ditch the previously pruned and + // finalized blocks 2 and 3. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(2))); + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(3))); + assert_from_unpin_rx( + &unpin_rx, + [H256::from_low_u64_le(2), H256::from_low_u64_le(3)], + ); + } + + #[tokio::test] + async fn never_unpin_new_block_before_finalized() { + // Ensure that if we drop a new block; the pinning is still active until the block is finalized. + let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( + || { + [ + Ok(ev_initialized(0)), + Ok(ev_new_block(0, 1)), + Ok(ev_new_block(1, 2)), + Ok(ev_best_block(1)), + Ok(ev_finalized([1], [])), + Ok(ev_finalized([2], [])), + Err(BackendError::Other("ended".to_owned())), + ] + }, + 10, + ); + + let _r = follow_unpin.next().await.unwrap().unwrap(); + + // drop initialised block 0 and new block 1 and new block 2. + let i0 = follow_unpin.next().await.unwrap().unwrap(); + drop(i0); + let n1 = follow_unpin.next().await.unwrap().unwrap(); + drop(n1); + let n2 = follow_unpin.next().await.unwrap().unwrap(); + drop(n2); + let b1 = follow_unpin.next().await.unwrap().unwrap(); + drop(b1); + + // Nothing unpinned yet! + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + + let f1 = follow_unpin.next().await.unwrap().unwrap(); + drop(f1); + + // After finalization, block 1 is now ready to be unpinned (it won't be seen again), + // but isn't actually unpinned yet (because it was just handed back in f1). Block 0 + // however has now been unpinned. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(0))); + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + + let f2 = follow_unpin.next().await.unwrap().unwrap(); + drop(f2); + + // After f2, we can get rid of block 1 now, which was finalized last time. + assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1))); + assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); + unpin_rx.try_recv().expect_err("nothing unpinned yet"); + } +} diff --git a/new/src/backend/chain_head/storage_items.rs b/new/src/backend/chain_head/storage_items.rs new file mode 100644 index 0000000000..917beacda2 --- /dev/null +++ b/new/src/backend/chain_head/storage_items.rs @@ -0,0 +1,169 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::follow_stream_driver::FollowStreamDriverHandle; +use super::follow_stream_unpin::BlockRef; +use crate::config::{Config, HashFor, RpcConfigFor}; +use crate::error::{BackendError, RpcError}; +use futures::{FutureExt, Stream, StreamExt}; +use std::collections::VecDeque; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; +use subxt_rpcs::methods::chain_head::{ + ChainHeadRpcMethods, FollowEvent, MethodResponse, StorageQuery, StorageResult, +}; + +/// Obtain a stream of storage items given some query. this handles continuing +/// and stopping under the hood, and returns a stream of `StorageResult`s. +pub struct StorageItems { + done: bool, + operation_id: Arc, + buffered_responses: VecDeque, + continue_call: ContinueFutGetter, + continue_fut: Option, + follow_event_stream: FollowEventStream>, +} + +impl StorageItems { + // Subscribe to follow events, and return a stream of storage results + // given some storage queries. The stream will automatically resume as + // needed, and stop when done. + pub async fn from_methods( + queries: impl Iterator>, + at: HashFor, + follow_handle: &FollowStreamDriverHandle>, + methods: ChainHeadRpcMethods>, + ) -> Result { + let sub_id = super::get_subscription_id(follow_handle).await?; + + // Subscribe to events and make the initial request to get an operation ID. + let follow_events = follow_handle.subscribe().events(); + let status = methods + .chainhead_v1_storage(&sub_id, at, queries, None) + .await?; + let operation_id: Arc = match status { + MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()), + MethodResponse::Started(s) => s.operation_id.into(), + }; + + // A function which returns the call to continue the subscription: + let continue_call: ContinueFutGetter = { + let operation_id = operation_id.clone(); + Box::new(move || { + let sub_id = sub_id.clone(); + let operation_id = operation_id.clone(); + let methods = methods.clone(); + + Box::pin(async move { + methods + .chainhead_v1_continue(&sub_id, &operation_id) + .await?; + Ok(()) + }) + }) + }; + + Ok(StorageItems::new( + operation_id, + continue_call, + Box::pin(follow_events), + )) + } + + fn new( + operation_id: Arc, + continue_call: ContinueFutGetter, + follow_event_stream: FollowEventStream>, + ) -> Self { + Self { + done: false, + buffered_responses: VecDeque::new(), + operation_id, + continue_call, + continue_fut: None, + follow_event_stream, + } + } +} + +pub type FollowEventStream = + Pin>> + Send + 'static>>; +pub type ContinueFutGetter = Box ContinueFut + Send + 'static>; +pub type ContinueFut = Pin> + Send + 'static>>; + +impl Stream for StorageItems { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + if self.done { + return Poll::Ready(None); + } + + if let Some(item) = self.buffered_responses.pop_front() { + return Poll::Ready(Some(Ok(item))); + } + + if let Some(mut fut) = self.continue_fut.take() { + match fut.poll_unpin(cx) { + Poll::Pending => { + self.continue_fut = Some(fut); + return Poll::Pending; + } + Poll::Ready(Err(e)) => { + if e.is_disconnected_will_reconnect() { + self.continue_fut = Some((self.continue_call)()); + continue; + } + + self.done = true; + return Poll::Ready(Some(Err(e))); + } + Poll::Ready(Ok(())) => { + // Finished; carry on. + } + } + } + + let ev = match self.follow_event_stream.poll_next_unpin(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some(ev)) => ev, + }; + + match ev { + FollowEvent::OperationWaitingForContinue(id) + if id.operation_id == *self.operation_id => + { + // Start a call to ask for more events + self.continue_fut = Some((self.continue_call)()); + continue; + } + FollowEvent::OperationStorageDone(id) if id.operation_id == *self.operation_id => { + // We're finished! + self.done = true; + return Poll::Ready(None); + } + FollowEvent::OperationStorageItems(items) + if items.operation_id == *self.operation_id => + { + // We have items; buffer them to emit next loops. + self.buffered_responses = items.items; + continue; + } + FollowEvent::OperationError(err) if err.operation_id == *self.operation_id => { + // Something went wrong obtaining storage items; mark as done and return the error. + self.done = true; + return Poll::Ready(Some(Err(BackendError::Other(err.error)))); + } + _ => { + // We don't care about this event; wait for the next. + continue; + } + } + } + } +} diff --git a/new/src/backend/combined.rs b/new/src/backend/combined.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/new/src/backend/legacy.rs b/new/src/backend/legacy.rs new file mode 100644 index 0000000000..75c62eee17 --- /dev/null +++ b/new/src/backend/legacy.rs @@ -0,0 +1,428 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module exposes a legacy backend implementation, which relies +//! on the legacy RPC API methods. + +mod descendant_streams; + +use self::rpc_methods::TransactionStatus as RpcTransactionStatus; +use crate::backend::utils::{retry, retry_stream}; +use crate::backend::{ + Backend, BlockRef, StorageResponse, StreamOf, StreamOfResults, + TransactionStatus, +}; +use crate::config::{Config, HashFor, Hasher, Header, RpcConfigFor}; +use crate::error::BackendError; +use async_trait::async_trait; +use futures::TryStreamExt; +use futures::{Future, Stream, StreamExt, future, future::Either, stream}; +use subxt_rpcs::RpcClient; +use codec::Encode; +use descendant_streams::{StorageFetchDescendantKeysStream, StorageFetchDescendantValuesStream}; + +/// Re-export legacy RPC types and methods from [`subxt_rpcs::methods::legacy`]. +pub mod rpc_methods { + pub use subxt_rpcs::methods::legacy::*; +} + +// Expose the RPC methods. +pub use rpc_methods::LegacyRpcMethods; + +/// Configure and build an [`LegacyBackend`]. +pub struct LegacyBackendBuilder { + storage_page_size: u32, + _marker: std::marker::PhantomData, +} + +impl Default for LegacyBackendBuilder { + fn default() -> Self { + Self::new() + } +} + +impl LegacyBackendBuilder { + /// Create a new [`LegacyBackendBuilder`]. + pub fn new() -> Self { + Self { + storage_page_size: 64, + _marker: std::marker::PhantomData, + } + } + + /// Iterating over storage entries using the [`LegacyBackend`] requires + /// fetching entries in batches. This configures the number of entries that + /// we'll try to obtain in each batch (default: 64). + pub fn storage_page_size(mut self, storage_page_size: u32) -> Self { + self.storage_page_size = storage_page_size; + self + } + + /// Given an [`RpcClient`] to use to make requests, this returns a [`LegacyBackend`], + /// which implements the [`Backend`] trait. + pub fn build(self, client: impl Into) -> LegacyBackend { + LegacyBackend { + storage_page_size: self.storage_page_size, + methods: LegacyRpcMethods::new(client.into()), + } + } +} + +/// The legacy backend. +#[derive(Debug)] +pub struct LegacyBackend { + storage_page_size: u32, + methods: LegacyRpcMethods>, +} + +impl Clone for LegacyBackend { + fn clone(&self) -> LegacyBackend { + LegacyBackend { + storage_page_size: self.storage_page_size, + methods: self.methods.clone(), + } + } +} + +impl LegacyBackend { + /// Configure and construct an [`LegacyBackend`]. + pub fn builder() -> LegacyBackendBuilder { + LegacyBackendBuilder::new() + } +} + +impl super::sealed::Sealed for LegacyBackend {} + +#[async_trait] +impl Backend for LegacyBackend { + async fn storage_fetch_values( + &self, + keys: Vec>, + at: HashFor, + ) -> Result, BackendError> { + fn get_entry( + key: Vec, + at: HashFor, + methods: LegacyRpcMethods>, + ) -> impl Future, BackendError>> { + retry(move || { + let methods = methods.clone(); + let key = key.clone(); + async move { + let res = methods.state_get_storage(&key, Some(at)).await?; + Ok(res.map(move |value| StorageResponse { key, value })) + } + }) + } + + let keys = keys.clone(); + let methods = self.methods.clone(); + + // For each key, return it + a future to get the result. + let iter = keys + .into_iter() + .map(move |key| get_entry(key, at, methods.clone())); + + let s = stream::iter(iter) + // Resolve the future + .then(|fut| fut) + // Filter any Options out (ie if we didn't find a value at some key we return nothing for it). + .filter_map(|r| future::ready(r.transpose())); + + Ok(StreamOf(Box::pin(s))) + } + + async fn storage_fetch_descendant_keys( + &self, + key: Vec, + at: HashFor, + ) -> Result>, BackendError> { + let keys = StorageFetchDescendantKeysStream::new( + self.methods.clone(), + key, + at, + self.storage_page_size + ); + + let keys = keys.flat_map(|keys| { + match keys { + Err(e) => { + // If there's an error, return that next: + Either::Left(stream::iter(std::iter::once(Err(e)))) + } + Ok(keys) => { + // Or, stream each "ok" value: + Either::Right(stream::iter(keys.into_iter().map(Ok))) + } + } + }); + + Ok(StreamOf(Box::pin(keys))) + } + + async fn storage_fetch_descendant_values( + &self, + key: Vec, + at: HashFor, + ) -> Result, BackendError> { + let values_stream = StorageFetchDescendantValuesStream::new( + self.methods.clone(), + key, + at, + self.storage_page_size + ); + + Ok(StreamOf(Box::pin(values_stream))) + } + + async fn genesis_hash(&self) -> Result, BackendError> { + retry(|| async { + let hash = self.methods.genesis_hash().await?; + Ok(hash) + }) + .await + } + + async fn block_header(&self, at: HashFor) -> Result, BackendError> { + retry(|| async { + let header = self.methods.chain_get_header(Some(at)).await?; + Ok(header) + }) + .await + } + + async fn block_body(&self, at: HashFor) -> Result>>, BackendError> { + retry(|| async { + let Some(details) = self.methods.chain_get_block(Some(at)).await? else { + return Ok(None); + }; + Ok(Some( + details.block.extrinsics.into_iter().map(|b| b.0).collect(), + )) + }) + .await + } + + async fn latest_finalized_block_ref(&self) -> Result>, BackendError> { + retry(|| async { + let hash = self.methods.chain_get_finalized_head().await?; + Ok(BlockRef::from_hash(hash)) + }) + .await + } + + async fn stream_all_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError> { + let methods = self.methods.clone(); + let retry_sub = retry_stream(move || { + let methods = methods.clone(); + let hasher = hasher.clone(); + Box::pin(async move { + let sub = methods.chain_subscribe_all_heads().await?; + let sub = sub.map_err(|e| e.into()).map(move |r| { + r.map(|h| { + let hash = hasher.hash(&h.encode()); + (h, BlockRef::from_hash(hash)) + }) + }); + Ok(StreamOf(Box::pin(sub))) + }) + }) + .await?; + + Ok(retry_sub) + } + + async fn stream_best_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError> { + let methods = self.methods.clone(); + + let retry_sub = retry_stream(move || { + let methods = methods.clone(); + let hasher = hasher.clone(); + Box::pin(async move { + let sub = methods.chain_subscribe_new_heads().await?; + let sub = sub.map_err(|e| e.into()).map(move |r| { + r.map(|h| { + let hash = hasher.hash(&h.encode()); + (h, BlockRef::from_hash(hash)) + }) + }); + Ok(StreamOf(Box::pin(sub))) + }) + }) + .await?; + + Ok(retry_sub) + } + + async fn stream_finalized_block_headers( + &self, + hasher: T::Hasher, + ) -> Result>)>, BackendError> { + let this = self.clone(); + + let retry_sub = retry_stream(move || { + let this = this.clone(); + let hasher = hasher.clone(); + Box::pin(async move { + let sub = this.methods.chain_subscribe_finalized_heads().await?; + + // Get the last finalized block immediately so that the stream will emit every finalized block after this. + let last_finalized_block_ref = this.latest_finalized_block_ref().await?; + let last_finalized_block_num = this + .block_header(last_finalized_block_ref.hash()) + .await? + .map(|h| h.number().into()); + + // Fill in any missing blocks, because the backend may not emit every finalized block; just the latest ones which + // are finalized each time. + let sub = subscribe_to_block_headers_filling_in_gaps( + this.methods.clone(), + sub, + last_finalized_block_num, + ); + let sub = sub.map(move |r| { + r.map(|h| { + let hash = hasher.hash(&h.encode()); + (h, BlockRef::from_hash(hash)) + }) + }); + + Ok(StreamOf(Box::pin(sub))) + }) + }) + .await?; + + Ok(retry_sub) + } + + async fn submit_transaction( + &self, + extrinsic: &[u8], + ) -> Result>>, BackendError> { + let sub = self + .methods + .author_submit_and_watch_extrinsic(extrinsic) + .await?; + + let sub = sub.filter_map(|r| { + let mapped = r + .map_err(|e| e.into()) + .map(|tx| { + match tx { + // We ignore these because they don't map nicely to the new API. They don't signal "end states" so this should be fine. + RpcTransactionStatus::Future => None, + RpcTransactionStatus::Retracted(_) => None, + // These roughly map across: + RpcTransactionStatus::Ready => Some(TransactionStatus::Validated), + RpcTransactionStatus::Broadcast(_peers) => { + Some(TransactionStatus::Broadcasted) + } + RpcTransactionStatus::InBlock(hash) => { + Some(TransactionStatus::InBestBlock { + hash: BlockRef::from_hash(hash), + }) + } + // These 5 mean that the stream will very likely end: + RpcTransactionStatus::FinalityTimeout(_) => { + Some(TransactionStatus::Dropped { + message: "Finality timeout".into(), + }) + } + RpcTransactionStatus::Finalized(hash) => { + Some(TransactionStatus::InFinalizedBlock { + hash: BlockRef::from_hash(hash), + }) + } + RpcTransactionStatus::Usurped(_) => Some(TransactionStatus::Invalid { + message: "Transaction was usurped by another with the same nonce" + .into(), + }), + RpcTransactionStatus::Dropped => Some(TransactionStatus::Dropped { + message: "Transaction was dropped".into(), + }), + RpcTransactionStatus::Invalid => Some(TransactionStatus::Invalid { + message: + "Transaction is invalid (eg because of a bad nonce, signature etc)" + .into(), + }), + } + }) + .transpose(); + + future::ready(mapped) + }); + + Ok(StreamOf::new(Box::pin(sub))) + } + + async fn call( + &self, + method: &str, + call_parameters: Option<&[u8]>, + at: HashFor, + ) -> Result, BackendError> { + retry(|| async { + let res = self + .methods + .state_call(method, call_parameters, Some(at)) + .await?; + Ok(res) + }) + .await + } +} + +/// Note: This is exposed for testing but is not considered stable and may change +/// without notice in a patch release. +#[doc(hidden)] +pub fn subscribe_to_block_headers_filling_in_gaps( + methods: LegacyRpcMethods>, + sub: S, + mut last_block_num: Option, +) -> impl Stream> + Send +where + T: Config, + S: Stream> + Send, + E: Into + Send + 'static, +{ + sub.flat_map(move |s| { + // Get the header, or return a stream containing just the error. + let header = match s { + Ok(header) => header, + Err(e) => return Either::Left(stream::once(async { Err(e.into()) })), + }; + + // We want all previous details up to, but not including this current block num. + let end_block_num = header.number().into(); + + // This is one after the last block we returned details for last time. + let start_block_num = last_block_num.map(|n| n + 1).unwrap_or(end_block_num); + + // Iterate over all of the previous blocks we need headers for, ignoring the current block + // (which we already have the header info for): + let methods = methods.clone(); + let previous_headers = stream::iter(start_block_num..end_block_num) + .then(move |n| { + let methods = methods.clone(); + async move { + let hash = methods.chain_get_block_hash(Some(n.into())).await?; + let header = methods.chain_get_header(hash).await?; + Ok::<_, BackendError>(header) + } + }) + .filter_map(async |h| h.transpose()); + + // On the next iteration, we'll get details starting just after this end block. + last_block_num = Some(end_block_num); + + // Return a combination of any previous headers plus the new header. + Either::Right(previous_headers.chain(stream::once(async { Ok(header) }))) + }) +} diff --git a/new/src/backend/legacy/descendant_streams.rs b/new/src/backend/legacy/descendant_streams.rs new file mode 100644 index 0000000000..dab5e48508 --- /dev/null +++ b/new/src/backend/legacy/descendant_streams.rs @@ -0,0 +1,256 @@ +use crate::backend::utils::retry; +use crate::backend::StorageResponse; +use crate::config::{Config, HashFor, RpcConfigFor}; +use crate::error::BackendError; +use futures::{Future, FutureExt, Stream, StreamExt}; +use std::collections::VecDeque; +use std::pin::Pin; +use std::task::{Context, Poll}; +use super::LegacyRpcMethods; + +/// This provides a stream of values given some prefix `key`. It +/// internally manages pagination and such. +#[allow(clippy::type_complexity)] +pub struct StorageFetchDescendantKeysStream { + methods: LegacyRpcMethods>, + key: Vec, + at: HashFor, + // How many entries to ask for each time. + storage_page_size: u32, + // What key do we start paginating from? None = from the beginning. + pagination_start_key: Option>, + // Keys, future and cached: + keys_fut: + Option>, BackendError>> + Send + 'static>>>, + // Set to true when we're done: + done: bool, +} + +impl StorageFetchDescendantKeysStream { + /// Fetch descendant keys. + pub fn new( + methods: LegacyRpcMethods>, + key: Vec, + at: HashFor, + storage_page_size: u32, + ) -> Self { + StorageFetchDescendantKeysStream { + methods, + key, + at, + storage_page_size, + pagination_start_key: None, + keys_fut: None, + done: false, + } + } +} + +impl std::marker::Unpin for StorageFetchDescendantKeysStream {} + +impl Stream for StorageFetchDescendantKeysStream { + type Item = Result>, BackendError>; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.as_mut(); + loop { + // We're already done. + if this.done { + return Poll::Ready(None); + } + + // Poll future to fetch next keys. + if let Some(mut keys_fut) = this.keys_fut.take() { + match keys_fut.poll_unpin(cx) { + Poll::Ready(Ok(mut keys)) => { + if this.pagination_start_key.is_some() + && keys.first() == this.pagination_start_key.as_ref() + { + // Currently, Smoldot returns the "start key" as the first key in the input + // (see https://github.com/smol-dot/smoldot/issues/1692), whereas Substrate doesn't. + // We don't expect the start key to be returned either (since it was the last key of prev + // iteration), so remove it if we see it. This `remove()` method isn't very efficient but + // this will be a non issue with the RPC V2 APIs or if Smoldot aligns with Substrate anyway. + keys.remove(0); + } + if keys.is_empty() { + // No keys left; we're done! + this.done = true; + return Poll::Ready(None); + } + // The last key is where we want to paginate from next time. + this.pagination_start_key = keys.last().cloned(); + // return all of the keys from this run. + return Poll::Ready(Some(Ok(keys))); + } + Poll::Ready(Err(e)) => { + if e.is_disconnected_will_reconnect() { + // Loop around and try again. No more keys_fut as it was taken, + // so we'll ask for the keys again from the last good pagination_start_key. + continue; + } + + // Error getting keys? Return it. + return Poll::Ready(Some(Err(e))); + }, + Poll::Pending => { + this.keys_fut = Some(keys_fut); + return Poll::Pending; + } + } + } + + // Else, we don't have a fut to get keys yet so start one going. + let methods = this.methods.clone(); + let key = this.key.clone(); + let at = this.at; + let storage_page_size = this.storage_page_size; + let pagination_start_key = this.pagination_start_key.clone(); + let keys_fut = async move { + let keys = methods + .state_get_keys_paged( + &key, + storage_page_size, + pagination_start_key.as_deref(), + Some(at), + ) + .await?; + Ok(keys) + }; + this.keys_fut = Some(Box::pin(keys_fut)); + } + } +} + +/// This provides a stream of values given some stream of keys. +#[allow(clippy::type_complexity)] +pub struct StorageFetchDescendantValuesStream { + // Stream of keys. + keys_stream: StorageFetchDescendantKeysStream, + // Keys back from the stream which we are currently trying to fetch results for: + keys: Vec>, + // A future which will resolve to the resulting values: + results_fut: Option< + Pin< + Box< + dyn Future, Vec)>>, BackendError>> + + Send + + 'static, + >, + >, + >, + // Once we get values back we put them here and hand them back one by one to the caller. + results: VecDeque<(Vec, Vec)>, +} + +impl StorageFetchDescendantValuesStream { + /// Fetch descendant values. + pub fn new( + methods: LegacyRpcMethods>, + key: Vec, + at: HashFor, + storage_page_size: u32, + ) -> Self { + StorageFetchDescendantValuesStream { + keys_stream: StorageFetchDescendantKeysStream { + methods, + key, + at, + storage_page_size, + pagination_start_key: None, + keys_fut: None, + done: false, + }, + keys: Default::default(), + results_fut: None, + results: Default::default(), + } + } +} + +impl Stream for StorageFetchDescendantValuesStream { + type Item = Result; + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut this = self.as_mut(); + + loop { + // If we have results back, return them one by one + if let Some((key, value)) = this.results.pop_front() { + let res = StorageResponse { key, value }; + return Poll::Ready(Some(Ok(res))); + } + + // If we're waiting on the next results then poll that future: + if let Some(mut results_fut) = this.results_fut.take() { + match results_fut.poll_unpin(cx) { + Poll::Ready(Ok(Some(results))) => { + // Clear keys once result comes back. + this.keys = Vec::new(); + this.results = results; + continue; + } + Poll::Ready(Ok(None)) => { + // Clear keys once result comes back. + this.keys = Vec::new(); + // But no results back for these keys we we just skip them. + continue; + } + Poll::Ready(Err(e)) => { + if e.is_disconnected_will_reconnect() { + // Don't replace the `results_fut` since we got disconnected, and loop around. + // This will cause us to try re-fetching results for the current keys. + continue; + } + + return Poll::Ready(Some(Err(e))) + } + Poll::Pending => { + this.results_fut = Some(results_fut); + return Poll::Pending; + } + } + } + + // If we have keys ready to fetch results for, then line up a results future to get them. + // The keys stream handles disconnections internally for us. + if !this.keys.is_empty() { + let methods = this.keys_stream.methods.clone(); + let at = this.keys_stream.at; + let keys = this.keys.clone(); + let results_fut = async move { + let keys = keys.iter().map(|k| &**k); + let values = retry(|| async { + let res = methods + .state_query_storage_at(keys.clone(), Some(at)) + .await?; + Ok(res) + }) + .await?; + let values: VecDeque<_> = values + .into_iter() + .flat_map(|v| { + v.changes.into_iter().filter_map(|(k, v)| { + let v = v?; + Some((k.0, v.0)) + }) + }) + .collect(); + Ok(Some(values)) + }; + + this.results_fut = Some(Box::pin(results_fut)); + continue; + } + + // We have no keys yet so wait for those first. + match this.keys_stream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(keys))) => { + this.keys = keys; + continue; + } + Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e))), + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, + } + } + } +} diff --git a/new/src/backend/utils.rs b/new/src/backend/utils.rs new file mode 100644 index 0000000000..54f6ba669f --- /dev/null +++ b/new/src/backend/utils.rs @@ -0,0 +1,273 @@ +//! RPC utils. + +use super::{StreamOf, StreamOfResults}; +use crate::error::BackendError; +use futures::{FutureExt, Stream, StreamExt}; +use std::{future::Future, pin::Pin, task::Poll}; + +/// Resubscribe callback. +type ResubscribeGetter = Box ResubscribeFuture + Send>; + +/// Future that resolves to a subscription stream. +type ResubscribeFuture = + Pin, BackendError>> + Send>>; + +/// Retry subscription. +struct RetrySubscription { + resubscribe: F, + state: RetrySubscriptionState, +} + +enum RetrySubscriptionState { + Init, + Pending(R), + Stream(StreamOfResults), + Done, +} + +impl std::marker::Unpin for RetrySubscription {} + +impl Stream for RetrySubscription +where + F: FnMut() -> R, + R: Future, BackendError>> + Unpin, +{ + type Item = Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + loop { + match &mut self.state { + RetrySubscriptionState::Init => { + self.state = RetrySubscriptionState::Pending((self.resubscribe)()); + }, + RetrySubscriptionState::Stream(s) => match s.poll_next_unpin(cx) { + Poll::Ready(Some(Err(err))) => { + if err.is_disconnected_will_reconnect() { + self.state = RetrySubscriptionState::Init; + } + return Poll::Ready(Some(Err(err))); + } + Poll::Ready(None) => { + return Poll::Ready(None) + } + Poll::Ready(Some(Ok(val))) => { + return Poll::Ready(Some(Ok(val))); + } + Poll::Pending => { + return Poll::Pending; + } + }, + RetrySubscriptionState::Pending(fut) => match fut.poll_unpin(cx) { + Poll::Ready(Err(err)) => { + if err.is_disconnected_will_reconnect() { + self.state = RetrySubscriptionState::Init; + } + return Poll::Ready(Some(Err(err))); + } + Poll::Ready(Ok(stream)) => { + self.state = RetrySubscriptionState::Stream(stream); + continue; + } + Poll::Pending => { + return Poll::Pending; + } + }, + RetrySubscriptionState::Done => { + return Poll::Ready(None) + } + }; + } + } +} + +/// Retry a future until it doesn't return a disconnected error. +/// +/// # Example +/// +/// ```rust,no_run,standalone_crate +/// use subxt::backend::utils::retry; +/// +/// async fn some_future() -> Result<(), subxt::error::BackendError> { +/// Ok(()) +/// } +/// +/// #[tokio::main] +/// async fn main() { +/// let result = retry(|| some_future()).await; +/// } +/// ``` +pub async fn retry(mut retry_future: F) -> Result +where + F: FnMut() -> T, + T: Future>, +{ + const REJECTED_MAX_RETRIES: usize = 10; + let mut rejected_retries = 0; + + loop { + match retry_future().await { + Ok(v) => return Ok(v), + Err(e) => { + if e.is_disconnected_will_reconnect() { + continue; + } + + // TODO: https://github.com/paritytech/subxt/issues/1567 + // This is a hack because, in the event of a disconnection, + // we may not get the correct subscription ID back on reconnecting. + // + // This is because we have a race between this future and the + // separate chainHead subscription, which runs in a different task. + // if this future is too quick, it'll be given back an old + // subscription ID from the chainHead subscription which has yet + // to reconnect and establish a new subscription ID. + // + // In the event of a wrong subscription Id being used, we happen to + // hand back an `RpcError::LimitReached`, and so can retry when we + // specifically hit that error to see if we get a new subscription ID + // eventually. + if e.is_rpc_limit_reached() && rejected_retries < REJECTED_MAX_RETRIES { + rejected_retries += 1; + continue; + } + + return Err(e); + } + } + } +} + +/// Create a retry stream that will resubscribe on disconnect. +/// +/// It's important to note that this function is intended to work only for stateless subscriptions. +/// If the subscription takes input or modifies state, this function should not be used. +/// +/// # Example +/// +/// ```rust,no_run,standalone_crate +/// use subxt::backend::{utils::retry_stream, StreamOf}; +/// use futures::future::FutureExt; +/// +/// #[tokio::main] +/// async fn main() { +/// retry_stream(|| { +/// // This needs to return a stream of results but if you are using +/// // the subxt backend already it will return StreamOf so you can just +/// // return it directly in the async block below. +/// async move { Ok(StreamOf::new(Box::pin(futures::stream::iter([Ok(2)])))) }.boxed() +/// }).await; +/// } +/// ``` +pub async fn retry_stream(get_stream: F) -> Result, BackendError> +where + F: Clone + Send + 'static + FnMut() -> Fut, + Fut: Future, BackendError>> + Send, + R: Send + 'static, +{ + // This returns the stream. On disconnect this is called again. + let get_stream_with_retry = move || { + let get_stream = get_stream.clone(); + async move { retry(get_stream).await }.boxed() + }; + + // The extra Box is to encapsulate the retry subscription type + Ok(StreamOf::new(Box::pin(RetrySubscription { + state: RetrySubscriptionState::Init, + resubscribe: get_stream_with_retry, + }))) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::backend::StreamOf; + + fn disconnect_err() -> BackendError { + BackendError::Rpc(subxt_rpcs::Error::DisconnectedWillReconnect(String::new()).into()) + } + + fn custom_err() -> BackendError { + BackendError::Other(String::new()) + } + + #[tokio::test] + async fn retry_stream_works() { + let retry_stream = retry_stream(|| { + async { + Ok(StreamOf::new(Box::pin(futures::stream::iter([ + Ok(1), + Ok(2), + Ok(3), + Err(disconnect_err()), + ])))) + } + .boxed() + }) + .await + .unwrap(); + + let result = retry_stream + .take(5) + .collect::>>() + .await; + + assert!(matches!(result[0], Ok(r) if r == 1)); + assert!(matches!(result[1], Ok(r) if r == 2)); + assert!(matches!(result[2], Ok(r) if r == 3)); + assert!(matches!(result[3], Err(ref e) if e.is_disconnected_will_reconnect())); + assert!(matches!(result[4], Ok(r) if r == 1)); + } + + #[tokio::test] + async fn retry_sub_works() { + let stream = futures::stream::iter([Ok(1), Err(disconnect_err())]); + + let resubscribe = Box::new(move || { + async move { Ok(StreamOf::new(Box::pin(futures::stream::iter([Ok(2)])))) }.boxed() + }); + + let retry_stream = RetrySubscription { + state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))), + resubscribe, + }; + + let result: Vec<_> = retry_stream.collect().await; + + assert!(matches!(result[0], Ok(r) if r == 1)); + assert!(matches!(result[1], Err(ref e) if e.is_disconnected_will_reconnect())); + assert!(matches!(result[2], Ok(r) if r == 2)); + } + + #[tokio::test] + async fn retry_sub_err_terminates_stream() { + let stream = futures::stream::iter([Ok(1)]); + let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed()); + + let retry_stream = RetrySubscription { + state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))), + resubscribe, + }; + + assert_eq!(retry_stream.count().await, 1); + } + + #[tokio::test] + async fn retry_sub_resubscribe_err() { + let stream = futures::stream::iter([Ok(1), Err(disconnect_err())]); + let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed()); + + let retry_stream = RetrySubscription { + state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))), + resubscribe, + }; + + let result: Vec<_> = retry_stream.collect().await; + + assert!(matches!(result[0], Ok(r) if r == 1)); + assert!(matches!(result[1], Err(ref e) if e.is_disconnected_will_reconnect())); + assert!(matches!(result[2], Err(ref e) if matches!(e, BackendError::Other(_)))); + } +} diff --git a/new/src/client.rs b/new/src/client.rs index 824c2cbc28..d2e347249c 100644 --- a/new/src/client.rs +++ b/new/src/client.rs @@ -1,2 +1,29 @@ +mod offline_client; mod online_client; -mod offline_client; \ No newline at end of file + +use core::marker::PhantomData; + +// We keep these traits internal, so that we can mess with them later if needed, +// and instead only the concrete types are public which wrap these trait impls. +pub(crate) use offline_client::OfflineClientAtBlockT; +pub(crate) use online_client::OnlineClientAtBlockT; + +pub use offline_client::OfflineClient; +pub use online_client::OnlineClient; + +/// This represents a client at a specific block number. +#[derive(Clone, Debug)] +pub struct ClientAtBlock { + client: Client, + marker: PhantomData, +} + +impl ClientAtBlock { + /// Construct a new client at some block. + pub(crate) fn new(client: Client) -> Self { + Self { + client, + marker: PhantomData, + } + } +} \ No newline at end of file diff --git a/new/src/client/offline_client.rs b/new/src/client/offline_client.rs index e69de29bb2..1ac0a41a11 100644 --- a/new/src/client/offline_client.rs +++ b/new/src/client/offline_client.rs @@ -0,0 +1,58 @@ +use crate::config::Config; +use crate::client::ClientAtBlock; +use crate::error::OfflineClientAtBlockError; +use subxt_metadata::Metadata; +use std::sync::Arc; + +#[derive(Clone, Debug)] +pub struct OfflineClient { + /// The configuration for this client. + config: T, +} + +impl OfflineClient { + /// Create a new [`OfflineClient`] with the given configuration. + pub fn new(config: T) -> Self { + OfflineClient { + config, + } + } + + /// Pick the block height at which to operate. This references data from the + /// [`OfflineClient`] it's called on, and so cannot outlive it. + pub fn at( + &self, + block_number: u32, + ) -> Result, OfflineClientAtBlockError> { + let spec_version = self + .config + .spec_version_for_block_number(block_number) + .ok_or(OfflineClientAtBlockError::SpecVersionNotFound { block_number })?; + + let metadata = self + .config + .metadata_for_spec_version(spec_version) + .ok_or(OfflineClientAtBlockError::MetadataNotFound { spec_version })?; + + Ok(ClientAtBlock::new(OfflineClientAtBlock { + metadata, + })) + } +} + +pub struct OfflineClientAtBlock { + metadata: Arc, +} + +/// This represents an offline-only client at a specific block. +#[doc(hidden)] +pub trait OfflineClientAtBlockT { + /// Get the metadata appropriate for this block. + fn metadata(&self) -> &Metadata; +} + +impl OfflineClientAtBlockT for OfflineClientAtBlock { + fn metadata(&self) -> &Metadata { + &self.metadata + } +} \ No newline at end of file diff --git a/new/src/client/online_client.rs b/new/src/client/online_client.rs index e69de29bb2..e2afa6c0b3 100644 --- a/new/src/client/online_client.rs +++ b/new/src/client/online_client.rs @@ -0,0 +1,337 @@ +use super::ClientAtBlock; +use super::OfflineClientAtBlockT; +use crate::config::{ Config, HashFor, RpcConfigFor }; +use crate::error::OnlineClientAtBlockError; +use crate::backend::Backend; +use codec::{Compact, Decode, Encode}; +use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; +use scale_info_legacy::TypeRegistrySet; +use std::sync::Arc; +use subxt_rpcs::methods::chain_head::ArchiveCallResult; +use subxt_rpcs::{ChainHeadRpcMethods, RpcClient}; +use subxt_metadata::Metadata; + +#[cfg(feature = "jsonrpsee")] +#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))] +use crate::error::OnlineClientError; + +/// A client which exposes the means to decode historic data on a chain online. +#[derive(Clone, Debug)] +pub struct OnlineClient { + inner: Arc>, +} + +struct OnlineClientInner { + /// The configuration for this client. + config: T, + /// The RPC methods used to communicate with the node. + backend: Arc>, +} + +impl std::fmt::Debug for OnlineClientInner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OnlineClientInner") + .field("config", &"") + .field("backend", &"Arc") + .finish() + } +} + +impl OnlineClient { + /// Construct a new [`OnlineClient`] using default settings which + /// point to a locally running node on `ws://127.0.0.1:9944`. + /// + /// **Note:** This will only work if the local node is an archive node. + #[cfg(feature = "jsonrpsee")] + pub async fn new(config: T) -> Result, OnlineClientError> { + let url = "ws://127.0.0.1:9944"; + OnlineClient::from_url(config, url).await + } + + /// Construct a new [`OnlineClient`], providing a URL to connect to. + #[cfg(feature = "jsonrpsee")] + pub async fn from_url( + config: T, + url: impl AsRef, + ) -> Result, OnlineClientError> { + let url_str = url.as_ref(); + let url = url::Url::parse(url_str).map_err(|_| OnlineClientError::InvalidUrl { + url: url_str.to_string(), + })?; + if !Self::is_url_secure(&url) { + return Err(OnlineClientError::RpcError( + subxt_rpcs::Error::InsecureUrl(url_str.to_string()), + )); + } + OnlineClient::from_insecure_url(config, url).await + } + + /// Construct a new [`OnlineClient`], providing a URL to connect to. + /// + /// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs). + #[cfg(feature = "jsonrpsee")] + pub async fn from_insecure_url( + config: T, + url: impl AsRef, + ) -> Result, OnlineClientError> { + let rpc_client = RpcClient::from_insecure_url(url).await?; + Ok(OnlineClient::from_rpc_client(config, rpc_client)) + } + + fn is_url_secure(url: &url::Url) -> bool { + let secure_scheme = url.scheme() == "https" || url.scheme() == "wss"; + let is_localhost = url.host().is_some_and(|e| match e { + url::Host::Domain(e) => e == "localhost", + url::Host::Ipv4(e) => e.is_loopback(), + url::Host::Ipv6(e) => e.is_loopback(), + }); + secure_scheme || is_localhost + } + + /// Construct a new [`OnlineClient`] by providing an [`RpcClient`] to drive the connection. + /// This will use the current default [`Backend`], which may change in future releases. + #[cfg(feature = "jsonrpsee")] + pub fn from_rpc_client( + config: T, + rpc_client: impl Into, + ) -> OnlineClient { + let rpc_client = rpc_client.into(); + let backend = Arc::new(LegacyBackend::builder().build(rpc_client)); + OnlineClient::from_backend(config, backend) + } + + /// Construct a new [`OnlineClient`] by providing an underlying [`Backend`] + /// implementation to power it. + pub fn from_backend>( + config: T, + backend: impl Into>>, + ) -> OnlineClient { + OnlineClient { + inner: Arc::new(OnlineClientInner { + config, + backend: backend.into() + }) + } + } + + /// Pick the block height at which to operate. This references data from the + /// [`OnlineClient`] it's called on, and so cannot outlive it. + pub async fn at_block( + &self, + block_number: u32, + ) -> Result, T>, OnlineClientAtBlockError> { + let config = &self.inner.config; + let rpc_methods = &self.inner.rpc_methods; + + let block_hash = rpc_methods + .archive_v1_hash_by_height(block_number as usize) + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetBlockHash { + block_number, + reason: e, + })? + .pop() + .ok_or_else(|| OnlineClientAtBlockError::BlockNotFound { block_number })? + .into(); + + // Get our configuration, or fetch from the node if not available. + let spec_version = + if let Some(spec_version) = config.spec_version_for_block_number(block_number) { + spec_version + } else { + // Fetch spec version. Caching this doesn't really make sense, so either + // details are provided offline or we fetch them every time. + get_spec_version(rpc_methods, block_hash).await? + }; + let metadata = if let Some(metadata) = config.metadata_for_spec_version(spec_version) { + metadata + } else { + // Fetch and then give our config the opportunity to cache this metadata. + let metadata = get_metadata(rpc_methods, block_hash).await?; + let metadata = Arc::new(metadata); + config.set_metadata_for_spec_version(spec_version, metadata.clone()); + metadata + }; + + let mut historic_types = config.legacy_types_for_spec_version(spec_version); + // The metadata can be used to construct call and event types instead of us having to hardcode them all for every spec version: + let types_from_metadata = frame_decode::helpers::type_registry_from_metadata_any(&metadata) + .map_err( + |parse_error| OnlineClientAtBlockError::CannotInjectMetadataTypes { parse_error }, + )?; + historic_types.prepend(types_from_metadata); + + Ok(ClientAtBlock::new(OnlineClientAtBlock { + config, + historic_types, + metadata, + rpc_methods, + block_hash, + })) + } +} + +/// This represents an online client at a specific block. +#[doc(hidden)] +pub trait OnlineClientAtBlockT: OfflineClientAtBlockT +{ + /// Return the RPC methods we'll use to interact with the node. + fn backend(&self) -> &dyn Backend; + /// Return the block hash for the current block. + fn block_hash(&self) -> HashFor; +} + +// Dev note: this shouldn't need to be exposed unless there is some +// need to explicitly name the ClientAAtBlock type. Rather keep it +// private to allow changes if possible. +#[doc(hidden)] +pub struct OnlineClientAtBlock { + metadata: Arc, + backend: Arc>, + hasher: T::Hasher, + block_hash: HashFor, +} + +impl OnlineClientAtBlockT for OnlineClientAtBlock { + fn backend(&self) -> &dyn Backend { + &*self.backend + } + fn block_hash(&self) -> HashFor { + self.block_hash + } +} + +impl OfflineClientAtBlockT for OnlineClientAtBlock { + fn metadata(&self) -> &Metadata { + &self.metadata + } +} + +async fn get_spec_version( + rpc_methods: &ChainHeadRpcMethods>, + block_hash: HashFor, +) -> Result { + use codec::Decode; + use subxt_rpcs::methods::chain_head::ArchiveCallResult; + + // make a runtime call to get the version information. This is also a constant + // in the metadata and so we could fetch it from there to avoid the call, but it would be a + // bit more effort. + let spec_version_bytes = { + let call_res = rpc_methods + .archive_v1_call(block_hash.into(), "Core_version", &[]) + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion { + block_hash: block_hash.to_string(), + reason: format!("Error calling Core_version: {e}"), + })?; + match call_res { + ArchiveCallResult::Success(bytes) => bytes.0, + ArchiveCallResult::Error(e) => { + return Err(OnlineClientAtBlockError::CannotGetSpecVersion { + block_hash: block_hash.to_string(), + reason: format!("Core_version returned an error: {e}"), + }); + } + } + }; + + // We only care about the spec version, so just decode enough of this version information + // to be able to pluck out what we want, and ignore the rest. + let spec_version = { + #[derive(codec::Decode)] + struct SpecVersionHeader { + _spec_name: String, + _impl_name: String, + _authoring_version: u32, + spec_version: u32, + } + SpecVersionHeader::decode(&mut &spec_version_bytes[..]) + .map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion { + block_hash: block_hash.to_string(), + reason: format!("Error decoding Core_version response: {e}"), + })? + .spec_version + }; + + Ok(spec_version) +} + +async fn get_metadata( + rpc_methods: &ChainHeadRpcMethods>, + block_hash: HashFor, +) -> Result { + // First, try to use the "modern" metadata APIs to get the most recent version we can. + let version_to_get = rpc_methods + .archive_v1_call(block_hash.into(), "Metadata_metadata_versions", &[]) + .await + .ok() + .and_then(|res| res.as_success()) + .and_then(|res| >::decode(&mut &res[..]).ok()) + .and_then(|versions| { + // We want to filter out the "unstable" version, which is represented by u32::MAX. + versions.into_iter().filter(|v| *v != u32::MAX).max() + }); + + // We had success calling the above API, so we expect the "modern" metadata API to work. + if let Some(version_to_get) = version_to_get { + let version_bytes = version_to_get.encode(); + let rpc_response = rpc_methods + .archive_v1_call( + block_hash.into(), + "Metadata_metadata_at_version", + &version_bytes, + ) + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.to_string(), + reason: format!("Error calling Metadata_metadata_at_version: {e}"), + }) + .and_then(|res| match res { + ArchiveCallResult::Success(bytes) => Ok(bytes.0), + ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.to_string(), + reason: format!("Calling Metadata_metadata_at_version returned an error: {e}"), + }), + })?; + + // Option because we may have asked for a version that doesn't exist. Compact because we get back a Vec + // of the metadata bytes, and the Vec is preceded by it's compact encoded length. The actual bytes are then + // decoded as a `RuntimeMetadataPrefixed`, after this. + let (_, metadata) = , RuntimeMetadataPrefixed)>>::decode(&mut &rpc_response[..]) + .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.to_string(), + reason: format!("Error decoding response for Metadata_metadata_at_version: {e}"), + })? + .ok_or_else(|| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.to_string(), + reason: format!("No metadata returned for the latest version from Metadata_metadata_versions ({version_to_get})"), + })?; + + return Ok(metadata.1); + } + + // We didn't get a version from Metadata_metadata_versions, so fall back to the "old" API. + let metadata_bytes = rpc_methods + .archive_v1_call(block_hash.into(), "Metadata_metadata", &[]) + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.to_string(), + reason: format!("Error calling Metadata_metadata: {e}"), + }) + .and_then(|res| match res { + ArchiveCallResult::Success(bytes) => Ok(bytes.0), + ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.to_string(), + reason: format!("Calling Metadata_metadata returned an error: {e}"), + }), + })?; + + let (_, metadata) = <(Compact, RuntimeMetadataPrefixed)>::decode(&mut &metadata_bytes[..]) + .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.to_string(), + reason: format!("Error decoding response for Metadata_metadata: {e}"), + })?; + + Ok(metadata.1) +} diff --git a/new/src/config.rs b/new/src/config.rs index 8b29c37f51..2c1c2a5461 100644 --- a/new/src/config.rs +++ b/new/src/config.rs @@ -21,7 +21,7 @@ use scale_decode::DecodeAsType; use scale_encode::EncodeAsType; use serde::{Serialize, de::DeserializeOwned}; use subxt_metadata::Metadata; -use std::{marker::PhantomData, sync::Arc}; +use std::{fmt::Display, marker::PhantomData, sync::Arc}; use scale_info_legacy::TypeRegistrySet; use subxt_rpcs::RpcConfig; @@ -63,7 +63,9 @@ pub trait Config: Clone + Debug + Sized + Send + Sync + 'static { /// /// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here, /// but the [`crate::client::OfflineClient`] will error if this is not available for the required block number. - fn spec_version_for_block_number(&self, block_number: u32) -> Option; + fn spec_version_for_block_number(&self, _block_number: u32) -> Option { + None + } /// Return the metadata for a given spec version, if available. /// @@ -72,17 +74,19 @@ pub trait Config: Clone + Debug + Sized + Send + Sync + 'static { /// The [`crate::client::OfflineClient`] will error if this is not available for the required spec version. fn metadata_for_spec_version( &self, - spec_version: u32, - ) -> Option>; + _spec_version: u32, + ) -> Option> { + None + } /// Set some metadata for a given spec version. the [`crate::client::OnlineClient`] will call this if it has /// to retrieve metadata from the chain, to give this the opportunity to cache it. The configuration can /// do nothing if it prefers. fn set_metadata_for_spec_version( &self, - spec_version: u32, - metadata: Arc, - ); + _spec_version: u32, + _metadata: Arc, + ) {} /// Return legacy types (ie types to use with Runtimes that return pre-V14 metadata) for a given spec version. /// If this returns `None`, [`subxt`] will return an error if type definitions are needed to access some older @@ -92,8 +96,10 @@ pub trait Config: Clone + Debug + Sized + Send + Sync + 'static { /// into our [`Metadata`] type, which will then be used. fn legacy_types_for_spec_version<'this>( &'this self, - spec_version: u32, - ) -> Option>; + _spec_version: u32, + ) -> Option> { + None + } } /// `RpcConfigFor` can be used anywhere which requires an implementation of [`subxt_rpcs::RpcConfig`]. @@ -117,6 +123,7 @@ pub type ParamsFor = <::ExtrinsicParams as ExtrinsicParams>:: /// Block hashes must conform to a bunch of things to be used in Subxt. pub trait Hash: Debug + + Display + Copy + Send + Sync @@ -132,6 +139,7 @@ pub trait Hash: } impl Hash for T where T: Debug + + Display + Copy + Send + Sync diff --git a/new/src/config/substrate.rs b/new/src/config/substrate.rs index b383987fcb..96990f517b 100644 --- a/new/src/config/substrate.rs +++ b/new/src/config/substrate.rs @@ -22,6 +22,7 @@ pub struct SubstrateConfigBuilder { legacy_types: Option, spec_version_for_block_number: RangeMap, metadata_for_spec_version: Mutex>>, + use_old_v9_hashers_before_spec_version: u32, } impl Default for SubstrateConfigBuilder { @@ -37,6 +38,7 @@ impl SubstrateConfigBuilder { legacy_types: None, spec_version_for_block_number: RangeMap::empty(), metadata_for_spec_version: Mutex::new(HashMap::new()), + use_old_v9_hashers_before_spec_version: 0, } } @@ -77,6 +79,16 @@ impl SubstrateConfigBuilder { self } + /// The storage hasher encoding/decoding changed during V9 metadata. By default we support the "new" version + /// of things. We can use this option to support the old version of things prior to a given spec version. + pub fn use_old_v9_hashers_before_spec_version( + mut self, + spec_version: u32 + ) -> Self { + self.use_old_v9_hashers_before_spec_version = spec_version; + self + } + /// Construct the [`SubstrateConfig`] from this builder. pub fn build(self) -> SubstrateConfig { SubstrateConfig { diff --git a/new/src/error.rs b/new/src/error.rs index eec82db3f9..07b385fdef 100644 --- a/new/src/error.rs +++ b/new/src/error.rs @@ -30,6 +30,12 @@ pub use subxt_metadata::TryFromError as MetadataTryFromError; #[non_exhaustive] #[allow(missing_docs)] pub enum Error { + #[error(transparent)] + OnlineClientError(#[from] OnlineClientError), + #[error(transparent)] + OfflineClientAtBlockError(#[from] OfflineClientAtBlockError), + #[error(transparent)] + OnlineClientAtBlockError(#[from] OnlineClientAtBlockError), #[error(transparent)] ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt), #[error(transparent)] @@ -47,8 +53,6 @@ pub enum Error { #[error(transparent)] AccountNonceError(#[from] AccountNonceError), #[error(transparent)] - OnlineClientError(#[from] OnlineClientError), - #[error(transparent)] RuntimeUpdaterError(#[from] RuntimeUpdaterError), #[error(transparent)] RuntimeUpdateeApplyError(#[from] RuntimeUpdateeApplyError), @@ -156,6 +160,108 @@ impl Error { } } +/// Errors constructing an offline client at a specific block number. +#[allow(missing_docs)] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum OfflineClientAtBlockError { + #[error( + "Cannot construct OfflineClientAtBlock: spec version not found for block number {block_number}" + )] + SpecVersionNotFound { + /// The block number for which the spec version was not found. + block_number: u32, + }, + #[error( + "Cannot construct OfflineClientAtBlock: metadata not found for spec version {spec_version}" + )] + MetadataNotFound { + /// The spec version for which the metadata was not found. + spec_version: u32, + }, +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum OnlineClientError { + #[error("Cannot construct OnlineClient: The URL provided is invalid: {url}")] + InvalidUrl { + /// The URL that was invalid. + url: String, + }, + #[error("Cannot construct OnlineClient: {0}")] + RpcError(#[from] subxt_rpcs::Error), + #[error( + "Cannot construct OnlineClient: Cannot fetch latest finalized block to obtain init details from: {0}" + )] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch genesis hash: {0}")] + CannotGetGenesisHash(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch current runtime version: {0}")] + CannotGetCurrentRuntimeVersion(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch metadata: {0}")] + CannotFetchMetadata(BackendError), +} + +impl OnlineClientError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + OnlineClientError::CannotGetLatestFinalizedBlock(e) + | OnlineClientError::CannotGetGenesisHash(e) + | OnlineClientError::CannotGetCurrentRuntimeVersion(e) + | OnlineClientError::CannotFetchMetadata(e) => Some(e), + _ => None, + } + } +} + +/// Errors constructing an online client at a specific block number. +#[allow(missing_docs)] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum OnlineClientAtBlockError { + #[error( + "Cannot construct OnlineClientAtBlock: failed to get block hash from node for block {block_number}: {reason}" + )] + CannotGetBlockHash { + /// Block number we failed to get the hash for. + block_number: u64, + /// The error we encountered. + reason: subxt_rpcs::Error, + }, + #[error("Cannot construct OnlineClientAtBlock: block number {block_number} not found")] + BlockNotFound { + /// The block number for which a block was not found. + block_number: u64, + }, + #[error( + "Cannot construct OnlineClientAtBlock: failed to get spec version for block hash {block_hash}: {reason}" + )] + CannotGetSpecVersion { + /// The block hash for which we failed to get the spec version. + block_hash: String, + /// The error we encountered. + reason: String, + }, + #[error( + "Cannot construct OnlineClientAtBlock: failed to get metadata for block hash {block_hash}: {reason}" + )] + CannotGetMetadata { + /// The block hash for which we failed to get the metadata. + block_hash: String, + /// The error we encountered. + reason: String, + }, + #[error( + "Cannot inject types from metadata: failure to parse a type found in the metadata: {parse_error}" + )] + CannotInjectMetadataTypes { + /// Error parsing a type found in the metadata. + parse_error: scale_info_legacy::lookup_name::ParseError, + }, +} + #[derive(Debug, thiserror::Error)] #[non_exhaustive] #[allow(missing_docs)] @@ -277,36 +383,6 @@ impl AccountNonceError { } } -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum OnlineClientError { - #[error("Cannot construct OnlineClient: {0}")] - RpcError(#[from] subxt_rpcs::Error), - #[error( - "Cannot construct OnlineClient: Cannot fetch latest finalized block to obtain init details from: {0}" - )] - CannotGetLatestFinalizedBlock(BackendError), - #[error("Cannot construct OnlineClient: Cannot fetch genesis hash: {0}")] - CannotGetGenesisHash(BackendError), - #[error("Cannot construct OnlineClient: Cannot fetch current runtime version: {0}")] - CannotGetCurrentRuntimeVersion(BackendError), - #[error("Cannot construct OnlineClient: Cannot fetch metadata: {0}")] - CannotFetchMetadata(BackendError), -} - -impl OnlineClientError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - OnlineClientError::CannotGetLatestFinalizedBlock(e) - | OnlineClientError::CannotGetGenesisHash(e) - | OnlineClientError::CannotGetCurrentRuntimeVersion(e) - | OnlineClientError::CannotFetchMetadata(e) => Some(e), - _ => None, - } - } -} - #[derive(Debug, thiserror::Error)] #[non_exhaustive] #[allow(missing_docs)] diff --git a/new/src/lib.rs b/new/src/lib.rs index 96377e526b..b648a7a9db 100644 --- a/new/src/lib.rs +++ b/new/src/lib.rs @@ -36,8 +36,8 @@ pub mod config; pub mod client; pub mod error; pub mod utils; +pub mod backend; // pub mod book; -// pub mod backend; // pub mod blocks; // pub mod constants; // pub mod custom_values; diff --git a/rpcs/src/methods/chain_head.rs b/rpcs/src/methods/chain_head.rs index 4c96ad8bc9..69863cd3d5 100644 --- a/rpcs/src/methods/chain_head.rs +++ b/rpcs/src/methods/chain_head.rs @@ -15,9 +15,9 @@ use serde::{Deserialize, Deserializer, Serialize}; use std::collections::{HashMap, VecDeque}; use std::task::Poll; -/// An interface to call the unstable RPC methods. This interface is instantiated with -/// some `T: Config` trait which determines some of the types that the RPC methods will -/// take or hand back. +/// An interface to call the new ["chainHead" RPC methods](https://paritytech.github.io/json-rpc-interface-spec/). +/// This interface is instantiated with some `T: RpcConfig` trait which determines some of the types that +/// the RPC methods will take or hand back. #[derive_where(Clone, Debug)] pub struct ChainHeadRpcMethods { client: RpcClient, @@ -386,14 +386,15 @@ impl ChainHeadRpcMethods { pub async fn archive_v1_storage( &self, block_hash: T::Hash, - items: impl IntoIterator>, + items: impl IntoIterator>, child_key: Option<&[u8]>, ) -> Result, Error> { - let items: Vec> = items + let items: Vec> = items .into_iter() - .map(|item| StorageQuery { + .map(|item| ArchiveStorageQuery { key: to_hex(item.key), query_type: item.query_type, + pagination_start_key: item.pagination_start_key.map(|k| to_hex(k)), }) .collect(); @@ -408,137 +409,6 @@ impl ChainHeadRpcMethods { Ok(ArchiveStorageSubscription { sub, done: false }) } - - // Dev note: we continue to support the latest "unstable" archive methods because - // they will be around for a while before the stable ones make it into a release. - // The below are just a copy-paste of the v1 methods, above, but calling the - // "unstable" RPCs instead. Eventually we'll remove them. - - /// Fetch the block body (ie the extrinsics in the block) given its hash. - /// - /// Returns an array of the hexadecimal-encoded scale-encoded extrinsics found in the block, - /// or `None` if the block wasn't found. - pub async fn archive_unstable_body( - &self, - block_hash: T::Hash, - ) -> Result>, Error> { - self.client - .request("archive_unstable_body", rpc_params![block_hash]) - .await - } - - /// Call the `archive_unstable_call` method and return the response. - pub async fn archive_unstable_call( - &self, - block_hash: T::Hash, - function: &str, - call_parameters: &[u8], - ) -> Result { - use serde::de::Error as _; - - // We deserialize to this intermediate shape, since - // we can't have a boolean tag to denote variants. - #[derive(Deserialize)] - struct Response { - success: bool, - value: Option, - error: Option, - // This was accidentally used instead of value in Substrate, - // so to support those impls we try it here if needed: - result: Option, - } - - let res: Response = self - .client - .request( - "archive_unstable_call", - rpc_params![block_hash, function, to_hex(call_parameters)], - ) - .await?; - - let value = res.value.or(res.result); - match (res.success, value, res.error) { - (true, Some(value), _) => Ok(ArchiveCallResult::Success(value)), - (false, _, err) => Ok(ArchiveCallResult::Error(err.unwrap_or(String::new()))), - (true, None, _) => { - let m = "archive_unstable_call: 'success: true' response should have `value: 0x1234` alongside it"; - Err(Error::Deserialization(serde_json::Error::custom(m))) - } - } - } - - /// Return the finalized block height of the chain. - pub async fn archive_unstable_finalized_height(&self) -> Result { - self.client - .request("archive_unstable_finalizedHeight", rpc_params![]) - .await - } - - /// Return the genesis hash. - pub async fn archive_unstable_genesis_hash(&self) -> Result { - self.client - .request("archive_unstable_genesisHash", rpc_params![]) - .await - } - - /// Given a block height, return the hashes of the zero or more blocks at that height. - /// For blocks older than the latest finalized block, only one entry will be returned. For blocks - /// newer than the latest finalized block, it's possible to have 0, 1 or multiple blocks at - /// that height given that forks could occur. - pub async fn archive_unstable_hash_by_height( - &self, - height: usize, - ) -> Result, Error> { - self.client - .request("archive_unstable_hashByHeight", rpc_params![height]) - .await - } - - /// Fetch the header for a block with the given hash, or `None` if no block with that hash exists. - pub async fn archive_unstable_header( - &self, - block_hash: T::Hash, - ) -> Result, Error> { - let maybe_encoded_header: Option = self - .client - .request("archive_unstable_header", rpc_params![block_hash]) - .await?; - - let Some(encoded_header) = maybe_encoded_header else { - return Ok(None); - }; - - let header = - ::decode(&mut &*encoded_header.0).map_err(Error::Decode)?; - Ok(Some(header)) - } - - /// Query the node storage and return a subscription which streams corresponding storage events back. - pub async fn archive_unstable_storage( - &self, - block_hash: T::Hash, - items: impl IntoIterator>, - child_key: Option<&[u8]>, - ) -> Result, Error> { - let items: Vec> = items - .into_iter() - .map(|item| StorageQuery { - key: to_hex(item.key), - query_type: item.query_type, - }) - .collect(); - - let sub = self - .client - .subscribe( - "archive_unstable_storage", - rpc_params![block_hash, items, child_key.map(to_hex)], - "archive_unstable_stopStorage", - ) - .await?; - - Ok(ArchiveStorageSubscription { sub, done: false }) - } } /// This represents events generated by the `follow` method. @@ -849,6 +719,24 @@ pub struct StorageQuery { pub query_type: StorageQueryType, } +/// The storage item received as parameter. This is used archive storage queries, and +/// unlike [`StorageQuery`] also contains `paginationStartKey` to define where iteration +/// should begin. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ArchiveStorageQuery { + /// The provided key. + pub key: Key, + /// The type of the storage query. + #[serde(rename = "type")] + pub query_type: StorageQueryType, + /// This parameter is optional and should be a string containing the hexadecimal-encoded key + /// from which the storage iteration should resume. This parameter is only valid in the context + /// of `descendantsValues` and `descendantsHashes`. + #[serde(skip_serializing_if = "Option::is_none")] + pub pagination_start_key: Option, +} + /// The type of the storage query. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -1104,7 +992,7 @@ impl ArchiveStorageEvent { } } -/// Something went wrong during the [`ChainHeadRpcMethods::archive_unstable_storage()`] subscription. +/// Something went wrong during the [`ChainHeadRpcMethods::archive_v1_storage()`] subscription. #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ArchiveStorageEventError { @@ -1112,7 +1000,7 @@ pub struct ArchiveStorageEventError { pub error: String, } -/// A storage item returned from the [`ChainHeadRpcMethods::archive_unstable_storage()`] subscription. +/// A storage item returned from the [`ChainHeadRpcMethods::archive_v1_storage()`] subscription. #[derive(Debug, Clone, PartialEq, Eq, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ArchiveStorageEventItem { diff --git a/rpcs/src/methods/legacy.rs b/rpcs/src/methods/legacy.rs index b7844f3ff7..a672fa5f81 100644 --- a/rpcs/src/methods/legacy.rs +++ b/rpcs/src/methods/legacy.rs @@ -13,7 +13,7 @@ use primitive_types::U256; use serde::{Deserialize, Serialize}; /// An interface to call the legacy RPC methods. This interface is instantiated with -/// some `T: Config` trait which determines some of the types that the RPC methods will +/// some `T: RpcConfig` trait which determines some of the types that the RPC methods will /// take or hand back. #[derive_where(Clone, Debug)] pub struct LegacyRpcMethods {