WIP: Backends added, Archive backend created

This commit is contained in:
James Wilson
2025-11-28 12:35:33 +00:00
parent 4c27bd8062
commit fbde20cb0e
24 changed files with 5155 additions and 186 deletions
Generated
+2 -2
View File
@@ -1953,9 +1953,9 @@ dependencies = [
[[package]]
name = "frame-decode"
version = "0.15.0"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fb3bfa2988ef40247e0e0eecfb171a01ad6f4e399485503aad4c413a5f236f3"
checksum = "9cb2b069fdf47c62526c6c7a64c5edba9c3c41b4bb11dac8e4fbf8e2857859a0"
dependencies = [
"frame-metadata 23.0.0",
"parity-scale-codec",
+1 -1
View File
@@ -82,7 +82,7 @@ darling = "0.20.10"
derive-where = "1.2.7"
either = { version = "1.13.0", default-features = false }
finito = { version = "0.1.0", default-features = false }
frame-decode = { version = "0.15.0", default-features = false }
frame-decode = { version = "0.16.0", default-features = false }
frame-metadata = { version = "23.0.0", default-features = false }
futures = { version = "0.3.31", default-features = false, features = ["std"] }
getrandom = { version = "0.2", default-features = false }
+1
View File
@@ -192,6 +192,7 @@ impl frame_decode::storage::StorageTypeInfo for Metadata {
.default_value
.as_ref()
.map(|def| Cow::Borrowed(&**def)),
use_old_v9_storage_hashers: false,
};
Ok(info)
+353
View File
@@ -0,0 +1,353 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
//! This module exposes a backend trait for Subxt which allows us to get and set
//! the necessary information (probably from a JSON-RPC API, but that's up to the
//! implementation).
mod chain_head;
mod archive;
mod legacy;
mod combined;
mod utils;
use crate::config::{Config, HashFor};
use crate::error::BackendError;
use async_trait::async_trait;
use codec::{Decode, Encode};
use futures::{Stream, StreamExt};
use std::pin::Pin;
use std::sync::Arc;
use subxt_metadata::Metadata;
/// Prevent the backend trait being implemented externally.
#[doc(hidden)]
pub(crate) mod sealed {
pub trait Sealed {}
}
/// This trait exposes the interface that Subxt will use to communicate with
/// a backend. Its goal is to be as minimal as possible.
#[async_trait]
pub trait Backend<T: Config>: sealed::Sealed + Send + Sync + 'static {
/// Fetch values from storage.
async fn storage_fetch_values(
&self,
keys: Vec<Vec<u8>>,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, BackendError>;
/// Fetch keys underneath the given key from storage.
async fn storage_fetch_descendant_keys(
&self,
key: Vec<u8>,
at: HashFor<T>,
) -> Result<StreamOfResults<Vec<u8>>, BackendError>;
/// Fetch values underneath the given key from storage.
async fn storage_fetch_descendant_values(
&self,
key: Vec<u8>,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, BackendError>;
/// Fetch the genesis hash
async fn genesis_hash(&self) -> Result<HashFor<T>, BackendError>;
/// Get a block header
async fn block_header(&self, at: HashFor<T>) -> Result<Option<T::Header>, BackendError>;
/// Return the extrinsics found in the block. Each extrinsic is represented
/// by a vector of bytes which has _not_ been SCALE decoded (in other words, the
/// first bytes in the vector will decode to the compact encoded length of the extrinsic)
async fn block_body(&self, at: HashFor<T>) -> Result<Option<Vec<Vec<u8>>>, BackendError>;
/// Get the most recent finalized block hash.
/// Note: needed only in blocks client for finalized block stream; can prolly be removed.
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<HashFor<T>>, BackendError>;
/// A stream of all new block headers as they arrive.
async fn stream_all_block_headers(
&self,
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError>;
/// A stream of best block headers.
async fn stream_best_block_headers(
&self,
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError>;
/// A stream of finalized block headers.
async fn stream_finalized_block_headers(
&self,
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError>;
/// Submit a transaction. This will return a stream of events about it.
async fn submit_transaction(
&self,
bytes: &[u8],
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, BackendError>;
/// Make a call to some runtime API.
async fn call(
&self,
method: &str,
call_parameters: Option<&[u8]>,
at: HashFor<T>,
) -> Result<Vec<u8>, BackendError>;
}
/// helpful utility methods derived from those provided on [`Backend`]
#[async_trait]
pub trait BackendExt<T: Config>: Backend<T> {
/// Fetch a single value from storage.
async fn storage_fetch_value(
&self,
key: Vec<u8>,
at: HashFor<T>,
) -> Result<Option<Vec<u8>>, BackendError> {
self.storage_fetch_values(vec![key], at)
.await?
.next()
.await
.transpose()
.map(|o| o.map(|s| s.value))
}
/// The same as a [`Backend::call()`], but it will also attempt to decode the
/// result into the given type, which is a fairly common operation.
async fn call_decoding<D: codec::Decode>(
&self,
method: &str,
call_parameters: Option<&[u8]>,
at: HashFor<T>,
) -> Result<D, BackendError> {
let bytes = self.call(method, call_parameters, at).await?;
let res =
D::decode(&mut &*bytes).map_err(BackendError::CouldNotScaleDecodeRuntimeResponse)?;
Ok(res)
}
/// Return the metadata at some version.
async fn metadata_at_version(
&self,
version: u32,
at: HashFor<T>,
) -> Result<Metadata, BackendError> {
let param = version.encode();
let opaque: Option<frame_metadata::OpaqueMetadata> = self
.call_decoding("Metadata_metadata_at_version", Some(&param), at)
.await?;
let Some(opaque) = opaque else {
return Err(BackendError::MetadataVersionNotFound(version));
};
let metadata: Metadata =
Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?;
Ok(metadata)
}
/// Return V14 metadata from the legacy `Metadata_metadata` call.
async fn legacy_metadata(&self, at: HashFor<T>) -> Result<Metadata, BackendError> {
let opaque: frame_metadata::OpaqueMetadata =
self.call_decoding("Metadata_metadata", None, at).await?;
let metadata: Metadata =
Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?;
Ok(metadata)
}
}
#[async_trait]
impl<B: Backend<T> + ?Sized, T: Config> BackendExt<T> for B {}
/// An opaque struct which, while alive, indicates that some references to a block
/// still exist. This gives the backend the opportunity to keep the corresponding block
/// details around for a while if it likes and is able to. No guarantees can be made about
/// how long the corresponding details might be available for, but if no references to a block
/// exist, then the backend is free to discard any details for it.
#[derive(Clone)]
pub struct BlockRef<H> {
hash: H,
// We keep this around so that when it is dropped, it has the
// opportunity to tell the backend.
_pointer: Option<Arc<dyn BlockRefT>>,
}
impl<H> From<H> for BlockRef<H> {
fn from(value: H) -> Self {
BlockRef::from_hash(value)
}
}
impl<H: PartialEq> PartialEq for BlockRef<H> {
fn eq(&self, other: &Self) -> bool {
self.hash == other.hash
}
}
impl<H: Eq> Eq for BlockRef<H> {}
// Manual implementation to work around https://github.com/mcarton/rust-derivative/issues/115.
impl<H: PartialOrd> PartialOrd for BlockRef<H> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.hash.partial_cmp(&other.hash)
}
}
impl<H: Ord> Ord for BlockRef<H> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.hash.cmp(&other.hash)
}
}
impl<H: std::fmt::Debug> std::fmt::Debug for BlockRef<H> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("BlockRef").field(&self.hash).finish()
}
}
impl<H: std::hash::Hash> std::hash::Hash for BlockRef<H> {
fn hash<Hasher: std::hash::Hasher>(&self, state: &mut Hasher) {
self.hash.hash(state);
}
}
impl<H> BlockRef<H> {
/// A [`BlockRef`] that doesn't reference a given block, but does have an associated hash.
/// This is used in the legacy backend, which has no notion of pinning blocks.
pub fn from_hash(hash: H) -> Self {
Self {
hash,
_pointer: None,
}
}
/// Construct a [`BlockRef`] from an instance of the underlying trait. It's expected
/// that the [`Backend`] implementation will call this if it wants to track which blocks
/// are potentially in use.
pub fn new<P: BlockRefT>(hash: H, inner: P) -> Self {
Self {
hash,
_pointer: Some(Arc::new(inner)),
}
}
/// Return the hash of the referenced block.
pub fn hash(&self) -> H
where
H: Copy,
{
self.hash
}
}
/// A trait that a [`Backend`] can implement to know when some block
/// can be unpinned: when this is dropped, there are no remaining references
/// to the block that it's associated with.
pub trait BlockRefT: Send + Sync + 'static {}
/// Runtime version information needed to submit transactions.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct RuntimeVersion {
/// Version of the runtime specification. A full-node will not attempt to use its native
/// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`,
/// `spec_version` and `authoring_version` are the same between Wasm and native.
pub spec_version: u32,
/// All existing dispatches are fully compatible when this number doesn't change. If this
/// number changes, then `spec_version` must change, also.
///
/// This number must change when an existing dispatchable (module ID, dispatch ID) is changed,
/// either through an alteration in its user-level semantics, a parameter
/// added/removed/changed, a dispatchable being removed, a module being removed, or a
/// dispatchable/module changing its index.
///
/// It need *not* change when a new module is added or when a dispatchable is added.
pub transaction_version: u32,
}
/// A stream of some item.
pub struct StreamOf<T>(Pin<Box<dyn Stream<Item = T> + Send + 'static>>);
impl<T> Stream for StreamOf<T> {
type Item = T;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
self.0.poll_next_unpin(cx)
}
}
impl<T> std::fmt::Debug for StreamOf<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("StreamOf").field(&"<stream>").finish()
}
}
impl<T> StreamOf<T> {
/// Construct a new stream.
pub fn new(inner: Pin<Box<dyn Stream<Item = T> + Send + 'static>>) -> Self {
StreamOf(inner)
}
/// Returns the next item in the stream. This is just a wrapper around
/// [`StreamExt::next()`] so that you can avoid the extra import.
pub async fn next(&mut self) -> Option<T> {
StreamExt::next(self).await
}
}
/// A stream of [`Result<Item, BackendError>`].
pub type StreamOfResults<T> = StreamOf<Result<T, BackendError>>;
/// The status of the transaction.
///
/// If the status is [`TransactionStatus::InFinalizedBlock`], [`TransactionStatus::Error`],
/// [`TransactionStatus::Invalid`] or [`TransactionStatus::Dropped`], then no future
/// events will be emitted.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TransactionStatus<Hash> {
/// Transaction is part of the future queue.
Validated,
/// The transaction has been broadcast to other nodes.
Broadcasted,
/// Transaction is no longer in a best block.
NoLongerInBestBlock,
/// Transaction has been included in block with given hash.
InBestBlock {
/// Block hash the transaction is in.
hash: BlockRef<Hash>,
},
/// Transaction has been finalized by a finality-gadget, e.g GRANDPA
InFinalizedBlock {
/// Block hash the transaction is in.
hash: BlockRef<Hash>,
},
/// Something went wrong in the node.
Error {
/// Human readable message; what went wrong.
message: String,
},
/// Transaction is invalid (bad nonce, signature etc).
Invalid {
/// Human readable message; why was it invalid.
message: String,
},
/// The transaction was dropped.
Dropped {
/// Human readable message; why was it dropped.
message: String,
},
}
/// A response from calls like [`Backend::storage_fetch_values`] or
/// [`Backend::storage_fetch_descendant_values`].
#[derive(serde::Serialize, serde::Deserialize, Clone, PartialEq, Debug)]
pub struct StorageResponse {
/// The key.
pub key: Vec<u8>,
/// The associated value.
pub value: Vec<u8>,
}
+212
View File
@@ -0,0 +1,212 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
//! This module exposes a backend implementation based on the new APIs
//! described at <https://github.com/paritytech/json-rpc-interface-spec/>. See
//! [`rpc_methods`] for the raw API calls.
//!
//! Specifically, the focus here is on the `archive` methods. These can only be used
//! to interact with archive nodes, but are less restrictive than the `chainHead` methods
//! in terms of the allowed operations.
//!
//! # Warning
//!
//! Everything in this module is **unstable**, meaning that it could change without
//! warning at any time.
mod storage_stream;
use crate::backend::{
Backend, BlockRef, StorageResponse, StreamOf, StreamOfResults,
TransactionStatus, utils::retry,
};
use crate::config::{Config, HashFor, RpcConfigFor};
use crate::error::BackendError;
use async_trait::async_trait;
use futures::StreamExt;
use subxt_rpcs::RpcClient;
use subxt_rpcs::methods::chain_head::{
ArchiveStorageQuery, ArchiveCallResult, StorageQueryType,
};
use storage_stream::ArchiveStorageStream;
/// Re-export RPC types and methods from [`subxt_rpcs::methods::chain_head`].
pub mod rpc_methods {
pub use subxt_rpcs::methods::chain_head::*;
}
// Expose the RPC methods.
pub use subxt_rpcs::methods::chain_head::ChainHeadRpcMethods as ArchiveRpcMethods;
/// The archive backend.
#[derive(Debug, Clone)]
pub struct ArchiveBackend<T: Config> {
// RPC methods we'll want to call:
methods: ArchiveRpcMethods<RpcConfigFor<T>>,
}
impl<T: Config> ArchiveBackend<T> {
/// Configure and construct an [`ArchiveBackend`] and the associated [`ChainHeadBackendDriver`].
pub fn new(client: impl Into<RpcClient>,) -> ArchiveBackend<T> {
let methods = ArchiveRpcMethods::new(client.into());
ArchiveBackend { methods }
}
}
#[async_trait]
impl<T: Config> Backend<T> for ArchiveBackend<T> {
async fn storage_fetch_values(
&self,
keys: Vec<Vec<u8>>,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, BackendError> {
let queries = keys.into_iter()
.map(|key| ArchiveStorageQuery {
key: key,
query_type: StorageQueryType::Value,
pagination_start_key: None,
})
.collect();
let stream = ArchiveStorageStream::new(at, self.methods.clone(), queries).map(|item| {
match item {
Err(e) => Some(Err(e)),
Ok(item) => item.value.map(|val| Ok(StorageResponse { key: item.key.0, value: val.0 }))
}
}).filter_map(async |item| item);
Ok(StreamOf(Box::pin(stream)))
}
async fn storage_fetch_descendant_keys(
&self,
key: Vec<u8>,
at: HashFor<T>,
) -> Result<StreamOfResults<Vec<u8>>, BackendError> {
let queries = std::iter::once(ArchiveStorageQuery {
key: key,
// Just ask for the hash and then ignore it and return keys
query_type: StorageQueryType::DescendantsHashes,
pagination_start_key: None,
})
.collect();
let stream = ArchiveStorageStream::new(at, self.methods.clone(), queries).map(|item| {
match item {
Err(e) => Err(e),
Ok(item) => Ok(item.key.0)
}
});
Ok(StreamOf(Box::pin(stream)))
}
async fn storage_fetch_descendant_values(
&self,
key: Vec<u8>,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, BackendError> {
let queries = std::iter::once(ArchiveStorageQuery {
key: key,
query_type: StorageQueryType::DescendantsValues,
pagination_start_key: None,
})
.collect();
let stream = ArchiveStorageStream::new(at, self.methods.clone(), queries).map(|item| {
match item {
Err(e) => Some(Err(e)),
Ok(item) => item.value.map(|val| Ok(StorageResponse { key: item.key.0, value: val.0 }))
}
}).filter_map(async |item| item);
Ok(StreamOf(Box::pin(stream)))
}
async fn genesis_hash(&self) -> Result<HashFor<T>, BackendError> {
retry(|| async {
let hash = self.methods.archive_v1_genesis_hash().await?;
Ok(hash)
})
.await
}
async fn block_header(&self, at: HashFor<T>) -> Result<Option<T::Header>, BackendError> {
retry(|| async {
let header = self.methods.archive_v1_header(at).await?;
Ok(header)
})
.await
}
async fn block_body(&self, at: HashFor<T>) -> Result<Option<Vec<Vec<u8>>>, BackendError> {
retry(|| async {
let Some(exts) = self.methods.archive_v1_body(at).await? else {
return Ok(None);
};
Ok(Some(
exts.into_iter().map(|ext| ext.0).collect()
))
})
.await
}
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<HashFor<T>>, BackendError> {
retry(|| async {
let height = self.methods.archive_v1_finalized_height().await?;
let mut hashes = self.methods.archive_v1_hash_by_height(height).await?;
let Some(hash) = hashes.pop() else {
return Err(BackendError::Other("Multiple hashes not expected at a finalized height".into()))
};
Ok(BlockRef::from_hash(hash))
})
.await
}
async fn stream_all_block_headers(
&self,
_hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError> {
Err(BackendError::Other("The archive backend cannot stream block headers".into()))
}
async fn stream_best_block_headers(
&self,
_hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError> {
Err(BackendError::Other("The archive backend cannot stream block headers".into()))
}
async fn stream_finalized_block_headers(
&self,
_hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError> {
Err(BackendError::Other("The archive backend cannot stream block headers".into()))
}
async fn submit_transaction(
&self,
extrinsic: &[u8],
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, BackendError> {
// This chainHead impl does not use chainHead_follow and so is suitable here too.
super::chain_head::submit_transaction_ignoring_follow_events(extrinsic, &self.methods).await
}
async fn call(
&self,
method: &str,
call_parameters: Option<&[u8]>,
at: HashFor<T>,
) -> Result<Vec<u8>, BackendError> {
let res = self.methods.archive_v1_call(at, method, call_parameters.unwrap_or(&[])).await?;
match res {
ArchiveCallResult::Success(bytes) => Ok(bytes.0),
ArchiveCallResult::Error(e) => Err(BackendError::Other(e)),
}
}
}
impl<T: Config> crate::backend::sealed::Sealed for ArchiveBackend<T> {}
+178
View File
@@ -0,0 +1,178 @@
use std::collections::VecDeque;
use subxt_rpcs::Error as RpcError;
use subxt_rpcs::methods::chain_head::{ArchiveStorageQuery, ArchiveStorageSubscription, ArchiveStorageEvent, ArchiveStorageEventItem};
use std::pin::Pin;
use std::future::Future;
use futures::{FutureExt, Stream, StreamExt};
use std::task::{Context, Poll};
use crate::error::BackendError;
use crate::config::{Config, HashFor, RpcConfigFor};
use super::ArchiveRpcMethods;
pub struct ArchiveStorageStream<T: Config> {
at: HashFor<T>,
methods: ArchiveRpcMethods<RpcConfigFor<T>>,
query_queue: VecDeque<ArchiveStorageQuery<Vec<u8>>>,
state: Option<StreamState<T>>,
}
enum StreamState<T: Config> {
GetSubscription {
current_query: ArchiveStorageQuery<Vec<u8>>,
sub_fut: Pin<Box<dyn Future<Output = Result<ArchiveStorageSubscription<HashFor<T>>, RpcError>> + Send + 'static>>
},
RunSubscription {
current_query: ArchiveStorageQuery<Vec<u8>>,
sub: ArchiveStorageSubscription<HashFor<T>>
},
}
impl <T: Config> ArchiveStorageStream<T> {
/// Fetch descendant keys.
pub fn new(
at: HashFor<T>,
methods: ArchiveRpcMethods<RpcConfigFor<T>>,
query_queue: VecDeque<ArchiveStorageQuery<Vec<u8>>>,
) -> Self {
Self {
at,
methods,
query_queue,
state: None,
}
}
}
impl<T: Config> std::marker::Unpin for ArchiveStorageStream<T> {}
impl<T: Config> Stream for ArchiveStorageStream<T> {
type Item = Result<ArchiveStorageEventItem<HashFor<T>>, BackendError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut();
loop {
match this.state.take() {
// No state yet so initialise!
None => {
// Nothing left; we're done.
let Some(query) = this.query_queue.pop_front() else {
return Poll::Ready(None);
};
let at = this.at;
let methods = this.methods.clone();
let current_query = query.clone();
let sub_fut = async move {
let query = std::iter::once(ArchiveStorageQuery {
key: query.key.as_ref(),
query_type: query.query_type,
pagination_start_key: query.pagination_start_key.as_deref(),
});
methods.archive_v1_storage(
at,
query,
None
).await
};
this.state = Some(StreamState::GetSubscription {
current_query,
sub_fut: Box::pin(sub_fut)
});
},
// We're getting our subscription stream for the current query.
Some(StreamState::GetSubscription { current_query, mut sub_fut }) => {
match sub_fut.poll_unpin(cx) {
Poll::Ready(Ok(sub)) => {
this.state = Some(StreamState::RunSubscription {
current_query,
sub
});
},
Poll::Ready(Err(e)) => {
if e.is_disconnected_will_reconnect() {
// Push the query back onto the queue to try again
this.query_queue.push_front(current_query);
continue;
}
this.state = None;
return Poll::Ready(Some(Err(e.into())))
}
Poll::Pending => {
this.state = Some(StreamState::GetSubscription {
current_query,
sub_fut
});
return Poll::Pending
},
}
},
// Running the subscription and returning results.
Some(StreamState::RunSubscription { current_query, mut sub }) => {
match sub.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(val))) => {
match val {
ArchiveStorageEvent::Item(item) => {
this.state = Some(StreamState::RunSubscription {
current_query: ArchiveStorageQuery {
key: current_query.key,
query_type: current_query.query_type,
// In the event of error, we resume from the last seen value.
// At the time of writing, it's not clear if paginationStartKey
// starts from the key itself or the first key after it:
// https://github.com/paritytech/json-rpc-interface-spec/issues/176
pagination_start_key: Some(item.key.0.clone())
},
sub
});
// We treat `paginationStartKey` as being the key we want results to begin _after_.
// So, if we see a value that's <= it, ignore the value.
let ignore_this_value = current_query
.pagination_start_key
.as_ref()
.is_some_and(|k| item.key.0.cmp(k).is_le());
if ignore_this_value {
continue;
}
return Poll::Ready(Some(Ok(item)));
},
ArchiveStorageEvent::Error(e) => {
this.state = None;
return Poll::Ready(Some(Err(BackendError::Other(e.error))))
},
ArchiveStorageEvent::Done => {
this.state = None;
continue;
},
}
},
Poll::Ready(Some(Err(e))) => {
if e.is_disconnected_will_reconnect() {
// Put the current query back into the queue and retry.
// We've been keeping it uptodate as needed.
this.query_queue.push_front(current_query);
this.state = None;
continue;
}
this.state = None;
return Poll::Ready(Some(Err(e.into())));
}
Poll::Ready(None) => {
this.state = None;
continue;
}
Poll::Pending => {
return Poll::Pending
},
}
},
}
}
}
}
+789
View File
@@ -0,0 +1,789 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
//! This module exposes a backend implementation based on the new APIs
//! described at <https://github.com/paritytech/json-rpc-interface-spec/>. See
//! [`rpc_methods`] for the raw API calls.
//!
//! Specifically, the focus here is on the `chainHead` methods.
//!
//! # Warning
//!
//! Everything in this module is **unstable**, meaning that it could change without
//! warning at any time.
mod follow_stream;
mod follow_stream_driver;
mod follow_stream_unpin;
mod storage_items;
use self::follow_stream_driver::FollowStreamFinalizedHeads;
use crate::backend::{
Backend, BlockRef, BlockRefT, StorageResponse, StreamOf, StreamOfResults,
TransactionStatus, utils::retry,
};
use crate::config::{Config, Hash, HashFor, RpcConfigFor};
use crate::error::{BackendError, RpcError};
use async_trait::async_trait;
use follow_stream_driver::{FollowStreamDriver, FollowStreamDriverHandle};
use futures::future::Either;
use futures::{Stream, StreamExt};
use std::collections::HashMap;
use std::task::Poll;
use storage_items::StorageItems;
use subxt_rpcs::RpcClient;
use subxt_rpcs::methods::chain_head::{
FollowEvent, MethodResponse, StorageQuery, StorageQueryType, StorageResultType,
};
/// Re-export RPC types and methods from [`subxt_rpcs::methods::chain_head`].
pub mod rpc_methods {
pub use subxt_rpcs::methods::chain_head::*;
}
// Expose the RPC methods.
pub use subxt_rpcs::methods::chain_head::ChainHeadRpcMethods;
/// Configure and build an [`ChainHeadBackend`].
pub struct ChainHeadBackendBuilder<T> {
max_block_life: usize,
transaction_timeout_secs: usize,
submit_transactions_ignoring_follow_events: bool,
_marker: std::marker::PhantomData<T>,
}
impl<T: Config> Default for ChainHeadBackendBuilder<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: Config> ChainHeadBackendBuilder<T> {
/// Create a new [`ChainHeadBackendBuilder`].
pub fn new() -> Self {
Self {
max_block_life: usize::MAX,
transaction_timeout_secs: 240,
submit_transactions_ignoring_follow_events: false,
_marker: std::marker::PhantomData,
}
}
/// The age of a block is defined here as the difference between the current finalized block number
/// and the block number of a given block. Once the difference equals or exceeds the number given
/// here, the block is unpinned.
///
/// By default, we will never automatically unpin blocks, but if the number of pinned blocks that we
/// keep hold of exceeds the number that the server can tolerate, then a `stop` event is generated and
/// we are forced to resubscribe, losing any pinned blocks.
pub fn max_block_life(mut self, max_block_life: usize) -> Self {
self.max_block_life = max_block_life;
self
}
/// When a transaction is submitted, we wait for events indicating it's successfully made it into a finalized
/// block. If it takes too long for this to happen, we assume that something went wrong and that we should
/// give up waiting.
///
/// Provide a value here to denote how long, in seconds, to wait before giving up. Defaults to 240 seconds.
///
/// If [`Self::submit_transactions_ignoring_follow_events()`] is called, this timeout is ignored.
pub fn transaction_timeout(mut self, timeout_secs: usize) -> Self {
self.transaction_timeout_secs = timeout_secs;
self
}
/// When a transaction is submitted, we normally synchronize the events that we get back with events from
/// our background `chainHead_follow` subscription, to ensure that any blocks hashes that we see can be
/// immediately queried (for example to get events or state at that block), and are kept around unless they
/// are no longer needed.
///
/// The main downside of this synchronization is that there may be a delay in being handed back a
/// [`TransactionStatus::InFinalizedBlock`] event while we wait to see the same block hash emitted from
/// our background `chainHead_follow` subscription in order to ensure it's available for querying.
///
/// Calling this method turns off this synchronization, speeding up the response and removing any reliance
/// on the `chainHead_follow` subscription continuing to run without stopping throughout submitting a transaction.
///
/// # Warning
///
/// This can lead to errors when calling APIs like `wait_for_finalized_success`, which will try to retrieve events
/// at the finalized block, because there will be a race and the finalized block may not be available for querying
/// yet.
pub fn submit_transactions_ignoring_follow_events(mut self) -> Self {
self.submit_transactions_ignoring_follow_events = true;
self
}
/// A low-level API to build the backend and driver which requires polling the driver for the backend
/// to make progress.
///
/// This is useful if you want to manage the driver yourself, for example if you want to run it in on
/// a specific runtime.
///
/// If you just want to run the driver in the background until completion in on the default runtime,
/// use [`ChainHeadBackendBuilder::build_with_background_driver`] instead.
pub fn build(
self,
client: impl Into<RpcClient>,
) -> (ChainHeadBackend<T>, ChainHeadBackendDriver<T>) {
// Construct the underlying follow_stream layers:
let rpc_methods = ChainHeadRpcMethods::new(client.into());
let follow_stream =
follow_stream::FollowStream::<HashFor<T>>::from_methods(rpc_methods.clone());
let follow_stream_unpin =
follow_stream_unpin::FollowStreamUnpin::<HashFor<T>>::from_methods(
follow_stream,
rpc_methods.clone(),
self.max_block_life,
);
let follow_stream_driver = FollowStreamDriver::new(follow_stream_unpin);
// Wrap these into the backend and driver that we'll expose.
let backend = ChainHeadBackend {
methods: rpc_methods,
follow_handle: follow_stream_driver.handle(),
transaction_timeout_secs: self.transaction_timeout_secs,
submit_transactions_ignoring_follow_events: self
.submit_transactions_ignoring_follow_events,
};
let driver = ChainHeadBackendDriver {
driver: follow_stream_driver,
};
(backend, driver)
}
/// An API to build the backend and driver which will run in the background until completion
/// on the default runtime.
///
/// - On non-wasm targets, this will spawn the driver on `tokio`.
/// - On wasm targets, this will spawn the driver on `wasm-bindgen-futures`.
#[cfg(feature = "runtime")]
pub fn build_with_background_driver(self, client: impl Into<RpcClient>) -> ChainHeadBackend<T> {
fn spawn<F: std::future::Future + Send + 'static>(future: F) {
#[cfg(not(target_family = "wasm"))]
tokio::spawn(async move {
future.await;
});
#[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
wasm_bindgen_futures::spawn_local(async move {
future.await;
});
}
let (backend, mut driver) = self.build(client);
spawn(async move {
// NOTE: we need to poll the driver until it's done i.e returns None
// to ensure that the backend is shutdown properly.
while let Some(res) = driver.next().await {
if let Err(err) = res {
tracing::debug!(target: "subxt", "chainHead backend error={err}");
}
}
tracing::debug!(target: "subxt", "chainHead backend was closed");
});
backend
}
}
/// Driver for the [`ChainHeadBackend`]. This must be polled in order for the
/// backend to make progress.
#[derive(Debug)]
pub struct ChainHeadBackendDriver<T: Config> {
driver: FollowStreamDriver<HashFor<T>>,
}
impl<T: Config> Stream for ChainHeadBackendDriver<T> {
type Item = <FollowStreamDriver<HashFor<T>> as Stream>::Item;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
self.driver.poll_next_unpin(cx)
}
}
/// The chainHead backend.
#[derive(Debug, Clone)]
pub struct ChainHeadBackend<T: Config> {
// RPC methods we'll want to call:
methods: ChainHeadRpcMethods<RpcConfigFor<T>>,
// A handle to the chainHead_follow subscription:
follow_handle: FollowStreamDriverHandle<HashFor<T>>,
// How long to wait until giving up on transactions:
transaction_timeout_secs: usize,
// Don't synchronise blocks with chainHead_follow when submitting txs:
submit_transactions_ignoring_follow_events: bool,
}
impl<T: Config> ChainHeadBackend<T> {
/// Configure and construct an [`ChainHeadBackend`] and the associated [`ChainHeadBackendDriver`].
pub fn builder() -> ChainHeadBackendBuilder<T> {
ChainHeadBackendBuilder::new()
}
/// Stream block headers based on the provided filter fn
async fn stream_headers<F>(
&self,
f: F,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError>
where
F: Fn(
FollowEvent<follow_stream_unpin::BlockRef<HashFor<T>>>,
) -> Vec<follow_stream_unpin::BlockRef<HashFor<T>>>
+ Send
+ Sync
+ 'static,
{
let methods = self.methods.clone();
let headers =
FollowStreamFinalizedHeads::new(self.follow_handle.subscribe(), f).flat_map(move |r| {
let methods = methods.clone();
let (sub_id, block_refs) = match r {
Ok(ev) => ev,
Err(e) => return Either::Left(futures::stream::once(async { Err(e) })),
};
Either::Right(
futures::stream::iter(block_refs).filter_map(move |block_ref| {
let methods = methods.clone();
let sub_id = sub_id.clone();
async move {
let res = methods
.chainhead_v1_header(&sub_id, block_ref.hash())
.await
.transpose()?;
let header = match res {
Ok(header) => header,
Err(e) => return Some(Err(e.into())),
};
Some(Ok((header, block_ref.into())))
}
}),
)
});
Ok(StreamOf(Box::pin(headers)))
}
}
impl<H: Hash + 'static> BlockRefT for follow_stream_unpin::BlockRef<H> {}
impl<H: Hash + 'static> From<follow_stream_unpin::BlockRef<H>> for BlockRef<H> {
fn from(b: follow_stream_unpin::BlockRef<H>) -> Self {
BlockRef::new(b.hash(), b)
}
}
impl<T: Config> super::sealed::Sealed for ChainHeadBackend<T> {}
#[async_trait]
impl<T: Config> Backend<T> for ChainHeadBackend<T> {
async fn storage_fetch_values(
&self,
keys: Vec<Vec<u8>>,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, BackendError> {
retry(|| async {
let queries = keys.iter().map(|key| StorageQuery {
key: &**key,
query_type: StorageQueryType::Value,
});
let storage_items =
StorageItems::from_methods(queries, at, &self.follow_handle, self.methods.clone())
.await?;
let stream = storage_items.filter_map(async |val| {
let val = match val {
Ok(val) => val,
Err(e) => return Some(Err(e)),
};
let StorageResultType::Value(result) = val.result else {
return None;
};
Some(Ok(StorageResponse {
key: val.key.0,
value: result.0,
}))
});
Ok(StreamOf(Box::pin(stream)))
})
.await
}
async fn storage_fetch_descendant_keys(
&self,
key: Vec<u8>,
at: HashFor<T>,
) -> Result<StreamOfResults<Vec<u8>>, BackendError> {
retry(|| async {
// Ask for hashes, and then just ignore them and return the keys that come back.
let query = StorageQuery {
key: &*key,
query_type: StorageQueryType::DescendantsHashes,
};
let storage_items = StorageItems::from_methods(
std::iter::once(query),
at,
&self.follow_handle,
self.methods.clone(),
)
.await?;
let storage_result_stream = storage_items.map(|val| val.map(|v| v.key.0));
Ok(StreamOf(Box::pin(storage_result_stream)))
})
.await
}
async fn storage_fetch_descendant_values(
&self,
key: Vec<u8>,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, BackendError> {
retry(|| async {
let query = StorageQuery {
key: &*key,
query_type: StorageQueryType::DescendantsValues,
};
let storage_items = StorageItems::from_methods(
std::iter::once(query),
at,
&self.follow_handle,
self.methods.clone(),
)
.await?;
let storage_result_stream = storage_items.filter_map(async |val| {
let val = match val {
Ok(val) => val,
Err(e) => return Some(Err(e)),
};
let StorageResultType::Value(result) = val.result else {
return None;
};
Some(Ok(StorageResponse {
key: val.key.0,
value: result.0,
}))
});
Ok(StreamOf(Box::pin(storage_result_stream)))
})
.await
}
async fn genesis_hash(&self) -> Result<HashFor<T>, BackendError> {
retry(|| async {
let genesis_hash = self.methods.chainspec_v1_genesis_hash().await?;
Ok(genesis_hash)
})
.await
}
async fn block_header(&self, at: HashFor<T>) -> Result<Option<T::Header>, BackendError> {
retry(|| async {
let sub_id = get_subscription_id(&self.follow_handle).await?;
let header = self.methods.chainhead_v1_header(&sub_id, at).await?;
Ok(header)
})
.await
}
async fn block_body(&self, at: HashFor<T>) -> Result<Option<Vec<Vec<u8>>>, BackendError> {
retry(|| async {
let sub_id = get_subscription_id(&self.follow_handle).await?;
// Subscribe to the body response and get our operationId back.
let follow_events = self.follow_handle.subscribe().events();
let status = self.methods.chainhead_v1_body(&sub_id, at).await?;
let operation_id = match status {
MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()),
MethodResponse::Started(s) => s.operation_id,
};
// Wait for the response to come back with the correct operationId.
let mut exts_stream = follow_events.filter_map(|ev| {
let FollowEvent::OperationBodyDone(body) = ev else {
return std::future::ready(None);
};
if body.operation_id != operation_id {
return std::future::ready(None);
}
let exts: Vec<_> = body.value.into_iter().map(|ext| ext.0).collect();
std::future::ready(Some(exts))
});
Ok(exts_stream.next().await)
})
.await
}
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<HashFor<T>>, BackendError> {
let next_ref: Option<BlockRef<HashFor<T>>> = self
.follow_handle
.subscribe()
.events()
.filter_map(|ev| {
let out = match ev {
FollowEvent::Initialized(init) => {
init.finalized_block_hashes.last().map(|b| b.clone().into())
}
_ => None,
};
std::future::ready(out)
})
.next()
.await;
next_ref.ok_or_else(|| RpcError::SubscriptionDropped.into())
}
async fn stream_all_block_headers(
&self,
_hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError> {
// TODO: https://github.com/paritytech/subxt/issues/1568
//
// It's possible that blocks may be silently missed if
// a reconnection occurs because it's restarted by the unstable backend.
self.stream_headers(|ev| match ev {
FollowEvent::Initialized(init) => init.finalized_block_hashes,
FollowEvent::NewBlock(ev) => {
vec![ev.block_hash]
}
_ => vec![],
})
.await
}
async fn stream_best_block_headers(
&self,
_hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError> {
// TODO: https://github.com/paritytech/subxt/issues/1568
//
// It's possible that blocks may be silently missed if
// a reconnection occurs because it's restarted by the unstable backend.
self.stream_headers(|ev| match ev {
FollowEvent::Initialized(init) => init.finalized_block_hashes,
FollowEvent::BestBlockChanged(ev) => vec![ev.best_block_hash],
_ => vec![],
})
.await
}
async fn stream_finalized_block_headers(
&self,
_hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError> {
self.stream_headers(|ev| match ev {
FollowEvent::Initialized(init) => init.finalized_block_hashes,
FollowEvent::Finalized(ev) => ev.finalized_block_hashes,
_ => vec![],
})
.await
}
async fn submit_transaction(
&self,
extrinsic: &[u8],
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, BackendError> {
if self.submit_transactions_ignoring_follow_events {
submit_transaction_ignoring_follow_events(extrinsic, &self.methods).await
} else {
submit_transaction_tracking_follow_events::<T>(
extrinsic,
self.transaction_timeout_secs as u64,
&self.methods,
&self.follow_handle,
)
.await
}
}
async fn call(
&self,
method: &str,
call_parameters: Option<&[u8]>,
at: HashFor<T>,
) -> Result<Vec<u8>, BackendError> {
retry(|| async {
let sub_id = get_subscription_id(&self.follow_handle).await?;
// Subscribe to the body response and get our operationId back.
let follow_events = self.follow_handle.subscribe().events();
let call_parameters = call_parameters.unwrap_or(&[]);
let status = self
.methods
.chainhead_v1_call(&sub_id, at, method, call_parameters)
.await?;
let operation_id = match status {
MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()),
MethodResponse::Started(s) => s.operation_id,
};
// Wait for the response to come back with the correct operationId.
let mut call_data_stream = follow_events.filter_map(|ev| {
let FollowEvent::OperationCallDone(body) = ev else {
return std::future::ready(None);
};
if body.operation_id != operation_id {
return std::future::ready(None);
}
std::future::ready(Some(body.output.0))
});
call_data_stream
.next()
.await
.ok_or_else(|| RpcError::SubscriptionDropped.into())
})
.await
}
}
/// A helper to obtain a subscription ID.
async fn get_subscription_id<H: Hash>(
follow_handle: &FollowStreamDriverHandle<H>,
) -> Result<String, BackendError> {
let Some(sub_id) = follow_handle.subscribe().subscription_id().await else {
return Err(RpcError::SubscriptionDropped.into());
};
Ok(sub_id)
}
// Submit a transaction. This makes no attempt to sync with follow events,
// This is used in the archive backend too.
pub(crate) async fn submit_transaction_ignoring_follow_events<T: Config>(
extrinsic: &[u8],
methods: &ChainHeadRpcMethods<RpcConfigFor<T>>,
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, BackendError> {
let tx_progress = methods
.transactionwatch_v1_submit_and_watch(extrinsic)
.await?
.map(|ev| {
ev.map(|tx_status| {
use subxt_rpcs::methods::chain_head::TransactionStatus as RpcTransactionStatus;
match tx_status {
RpcTransactionStatus::Validated => TransactionStatus::Validated,
RpcTransactionStatus::Broadcasted => TransactionStatus::Broadcasted,
RpcTransactionStatus::BestChainBlockIncluded { block: None } => {
TransactionStatus::NoLongerInBestBlock
},
RpcTransactionStatus::BestChainBlockIncluded { block: Some(block) } => {
TransactionStatus::InBestBlock { hash: BlockRef::from_hash(block.hash) }
},
RpcTransactionStatus::Finalized { block } => {
TransactionStatus::InFinalizedBlock { hash: BlockRef::from_hash(block.hash) }
},
RpcTransactionStatus::Error { error } => {
TransactionStatus::Error { message: error }
},
RpcTransactionStatus::Invalid { error } => {
TransactionStatus::Invalid { message: error }
},
RpcTransactionStatus::Dropped { error } => {
TransactionStatus::Dropped { message: error }
},
}
}).map_err(Into::into)
});
Ok(StreamOf(Box::pin(tx_progress)))
}
// Submit a transaction. This synchronizes with chainHead_follow events to ensure
// that block hashes returned are ready to be queried.
async fn submit_transaction_tracking_follow_events<T: Config>(
extrinsic: &[u8],
transaction_timeout_secs: u64,
methods: &ChainHeadRpcMethods<RpcConfigFor<T>>,
follow_handle: &FollowStreamDriverHandle<HashFor<T>>,
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, BackendError> {
// We care about new and finalized block hashes.
enum SeenBlockMarker {
New,
Finalized,
}
// First, subscribe to new blocks.
let mut seen_blocks_sub = follow_handle.subscribe().events();
// Then, submit the transaction.
let mut tx_progress = methods
.transactionwatch_v1_submit_and_watch(extrinsic)
.await?;
let mut seen_blocks = HashMap::new();
let mut done = false;
// If we see the finalized event, we start waiting until we find a finalized block that
// matches, so we can guarantee to return a pinned block hash and be properly in sync
// with chainHead_follow.
let mut finalized_hash: Option<HashFor<T>> = None;
// Record the start time so that we can time out if things appear to take too long.
let start_instant = web_time::Instant::now();
// A quick helper to return a generic error.
let err_other = |s: &str| Some(Err(BackendError::Other(s.into())));
// Now we can attempt to associate tx events with pinned blocks.
let tx_stream = futures::stream::poll_fn(move |cx| {
loop {
// Bail early if we're finished; nothing else to do.
if done {
return Poll::Ready(None);
}
// Bail if we exceed 4 mins; something very likely went wrong.
if start_instant.elapsed().as_secs() > transaction_timeout_secs {
return Poll::Ready(err_other(
"Timeout waiting for the transaction to be finalized",
));
}
// Poll for a follow event, and error if the stream has unexpectedly ended.
let follow_ev_poll = match seen_blocks_sub.poll_next_unpin(cx) {
Poll::Ready(None) => {
return Poll::Ready(err_other(
"chainHead_follow stream ended unexpectedly",
));
}
Poll::Ready(Some(follow_ev)) => Poll::Ready(follow_ev),
Poll::Pending => Poll::Pending,
};
let follow_ev_is_pending = follow_ev_poll.is_pending();
// If there was a follow event, then handle it and loop around to see if there are more.
// We want to buffer follow events until we hit Pending, so that we are as up-to-date as possible
// for when we see a BestBlockChanged event, so that we have the best change of already having
// seen the block that it mentions and returning a proper pinned block.
if let Poll::Ready(follow_ev) = follow_ev_poll {
match follow_ev {
FollowEvent::NewBlock(ev) => {
// Optimization: once we have a `finalized_hash`, we only care about finalized
// block refs now and can avoid bothering to save new blocks.
if finalized_hash.is_none() {
seen_blocks.insert(
ev.block_hash.hash(),
(SeenBlockMarker::New, ev.block_hash),
);
}
}
FollowEvent::Finalized(ev) => {
for block_ref in ev.finalized_block_hashes {
seen_blocks.insert(
block_ref.hash(),
(SeenBlockMarker::Finalized, block_ref),
);
}
}
FollowEvent::Stop => {
// If we get this event, we'll lose all of our existing pinned blocks and have a gap
// in which we may lose the finalized block that the TX is in. For now, just error if
// this happens, to prevent the case in which we never see a finalized block and wait
// forever.
return Poll::Ready(err_other(
"chainHead_follow emitted 'stop' event during transaction submission",
));
}
_ => {}
}
continue;
}
// If we have a finalized hash, we are done looking for tx events and we are just waiting
// for a pinned block with a matching hash (which must appear eventually given it's finalized).
if let Some(hash) = &finalized_hash {
if let Some((SeenBlockMarker::Finalized, block_ref)) =
seen_blocks.remove(hash)
{
// Found it! Hand back the event with a pinned block. We're done.
done = true;
let ev = TransactionStatus::InFinalizedBlock {
hash: block_ref.into(),
};
return Poll::Ready(Some(Ok(ev)));
} else {
// Not found it! If follow ev is pending, then return pending here and wait for
// a new one to come in, else loop around and see if we get another one immediately.
seen_blocks.clear();
if follow_ev_is_pending {
return Poll::Pending;
} else {
continue;
}
}
}
// If we don't have a finalized block yet, we keep polling for tx progress events.
let tx_progress_ev = match tx_progress.poll_next_unpin(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => {
return Poll::Ready(err_other(
"No more transaction progress events, but we haven't seen a Finalized one yet",
));
}
Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e.into()))),
Poll::Ready(Some(Ok(ev))) => ev,
};
// When we get one, map it to the correct format (or for finalized ev, wait for the pinned block):
use subxt_rpcs::methods::chain_head::TransactionStatus as RpcTransactionStatus;
let tx_progress_ev = match tx_progress_ev {
RpcTransactionStatus::Finalized { block } => {
// We'll wait until we have seen this hash, to try to guarantee
// that when we return this event, the corresponding block is
// pinned and accessible.
finalized_hash = Some(block.hash);
continue;
}
RpcTransactionStatus::BestChainBlockIncluded { block: Some(block) } => {
// Look up a pinned block ref if we can, else return a non-pinned
// block that likely isn't accessible. We have no guarantee that a best
// block on the node a tx was sent to will ever be known about on the
// chainHead_follow subscription.
let block_ref = match seen_blocks.get(&block.hash) {
Some((_, block_ref)) => block_ref.clone().into(),
None => BlockRef::from_hash(block.hash),
};
TransactionStatus::InBestBlock { hash: block_ref }
}
RpcTransactionStatus::BestChainBlockIncluded { block: None } => {
TransactionStatus::NoLongerInBestBlock
}
RpcTransactionStatus::Broadcasted => TransactionStatus::Broadcasted,
RpcTransactionStatus::Dropped { error, .. } => {
TransactionStatus::Dropped { message: error }
}
RpcTransactionStatus::Error { error } => {
TransactionStatus::Error { message: error }
}
RpcTransactionStatus::Invalid { error } => {
TransactionStatus::Invalid { message: error }
}
RpcTransactionStatus::Validated => TransactionStatus::Validated,
};
return Poll::Ready(Some(Ok(tx_progress_ev)));
}
});
Ok(StreamOf(Box::pin(tx_stream)))
}
+336
View File
@@ -0,0 +1,336 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
use crate::config::{Config, HashFor, RpcConfigFor};
use crate::error::BackendError;
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use subxt_rpcs::methods::chain_head::{ChainHeadRpcMethods, FollowEvent};
/// A `Stream` whose goal is to remain subscribed to `chainHead_follow`. It will re-subscribe if the subscription
/// is ended for any reason, and it will return the current `subscription_id` as an event, along with the other
/// follow events.
pub struct FollowStream<Hash> {
// Using this and not just keeping a copy of the RPC methods
// around means that we can test this in isolation with dummy streams.
stream_getter: FollowEventStreamGetter<Hash>,
stream: InnerStreamState<Hash>,
}
impl<Hash> std::fmt::Debug for FollowStream<Hash> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("FollowStream")
.field("stream_getter", &"..")
.field("stream", &self.stream)
.finish()
}
}
/// A getter function that returns an [`FollowEventStreamFut<Hash>`].
pub type FollowEventStreamGetter<Hash> = Box<dyn FnMut() -> FollowEventStreamFut<Hash> + Send>;
/// The future which will return a stream of follow events and the subscription ID for it.
pub type FollowEventStreamFut<Hash> = Pin<
Box<
dyn Future<Output = Result<(FollowEventStream<Hash>, String), BackendError>>
+ Send
+ 'static,
>,
>;
/// The stream of follow events.
pub type FollowEventStream<Hash> =
Pin<Box<dyn Stream<Item = Result<FollowEvent<Hash>, BackendError>> + Send + 'static>>;
/// Either a ready message with the current subscription ID, or
/// an event from the stream itself.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum FollowStreamMsg<Hash> {
/// The stream is ready (and has a subscription ID)
Ready(String),
/// An event from the stream.
Event(FollowEvent<Hash>),
}
impl<Hash> FollowStreamMsg<Hash> {
/// Return an event, or none if the message is a "ready" one.
pub fn into_event(self) -> Option<FollowEvent<Hash>> {
match self {
FollowStreamMsg::Ready(_) => None,
FollowStreamMsg::Event(e) => Some(e),
}
}
}
enum InnerStreamState<Hash> {
/// We've just created the stream; we'll start Initializing it
New,
/// We're fetching the inner subscription. Move to Ready when we have one.
Initializing(FollowEventStreamFut<Hash>),
/// Report back the subscription ID here, and then start ReceivingEvents.
Ready(Option<(FollowEventStream<Hash>, String)>),
/// We are polling for, and receiving events from the stream.
ReceivingEvents(FollowEventStream<Hash>),
/// We received a stop event. We'll send one on and restart the stream.
Stopped,
/// The stream is finished and will not restart (likely due to an error).
Finished,
}
impl<Hash> std::fmt::Debug for InnerStreamState<Hash> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::New => write!(f, "New"),
Self::Initializing(_) => write!(f, "Initializing(..)"),
Self::Ready(_) => write!(f, "Ready(..)"),
Self::ReceivingEvents(_) => write!(f, "ReceivingEvents(..)"),
Self::Stopped => write!(f, "Stopped"),
Self::Finished => write!(f, "Finished"),
}
}
}
impl<Hash> FollowStream<Hash> {
/// Create a new [`FollowStream`] given a function which returns the stream.
pub fn new(stream_getter: FollowEventStreamGetter<Hash>) -> Self {
Self {
stream_getter,
stream: InnerStreamState::New,
}
}
/// Create a new [`FollowStream`] given the RPC methods.
pub fn from_methods<T: Config>(methods: ChainHeadRpcMethods<RpcConfigFor<T>>) -> FollowStream<HashFor<T>> {
FollowStream {
stream_getter: Box::new(move || {
let methods = methods.clone();
Box::pin(async move {
// Make the RPC call:
let stream = methods.chainhead_v1_follow(true).await?;
// Extract the subscription ID:
let Some(sub_id) = stream.subscription_id().map(ToOwned::to_owned) else {
return Err(BackendError::Other(
"Subscription ID expected for chainHead_follow response, but not given"
.to_owned(),
));
};
// Map stream errors into the higher level subxt one:
let stream = stream.map_err(|e| e.into());
let stream: FollowEventStream<HashFor<T>> = Box::pin(stream);
// Return both:
Ok((stream, sub_id))
})
}),
stream: InnerStreamState::New,
}
}
}
impl<Hash> std::marker::Unpin for FollowStream<Hash> {}
impl<Hash> Stream for FollowStream<Hash> {
type Item = Result<FollowStreamMsg<Hash>, BackendError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
loop {
match &mut this.stream {
InnerStreamState::New => {
let fut = (this.stream_getter)();
this.stream = InnerStreamState::Initializing(fut);
continue;
}
InnerStreamState::Initializing(fut) => {
match fut.poll_unpin(cx) {
Poll::Pending => {
return Poll::Pending;
}
Poll::Ready(Ok(sub_with_id)) => {
this.stream = InnerStreamState::Ready(Some(sub_with_id));
continue;
}
Poll::Ready(Err(e)) => {
// Re-start if a reconnecting backend was enabled.
if e.is_disconnected_will_reconnect() {
this.stream = InnerStreamState::Stopped;
continue;
}
// Finish forever if there's an error, passing it on.
this.stream = InnerStreamState::Finished;
return Poll::Ready(Some(Err(e)));
}
}
}
InnerStreamState::Ready(stream) => {
// We never set the Option to `None`; we just have an Option so
// that we can take ownership of the contents easily here.
let (sub, sub_id) = stream.take().expect("should always be Some");
this.stream = InnerStreamState::ReceivingEvents(sub);
return Poll::Ready(Some(Ok(FollowStreamMsg::Ready(sub_id))));
}
InnerStreamState::ReceivingEvents(stream) => {
match stream.poll_next_unpin(cx) {
Poll::Pending => {
return Poll::Pending;
}
Poll::Ready(None) => {
// No error happened but the stream ended; restart and
// pass on a Stop message anyway.
this.stream = InnerStreamState::Stopped;
continue;
}
Poll::Ready(Some(Ok(ev))) => {
if let FollowEvent::Stop = ev {
// A stop event means the stream has ended, so start
// over after passing on the stop message.
this.stream = InnerStreamState::Stopped;
continue;
}
return Poll::Ready(Some(Ok(FollowStreamMsg::Event(ev))));
}
Poll::Ready(Some(Err(e))) => {
// Re-start if a reconnecting backend was enabled.
if e.is_disconnected_will_reconnect() {
this.stream = InnerStreamState::Stopped;
continue;
}
// Finish forever if there's an error, passing it on.
this.stream = InnerStreamState::Finished;
return Poll::Ready(Some(Err(e)));
}
}
}
InnerStreamState::Stopped => {
this.stream = InnerStreamState::New;
return Poll::Ready(Some(Ok(FollowStreamMsg::Event(FollowEvent::Stop))));
}
InnerStreamState::Finished => {
return Poll::Ready(None);
}
}
}
}
}
#[cfg(test)]
pub(super) mod test_utils {
use super::*;
use crate::config::substrate::H256;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use subxt_rpcs::methods::chain_head::{BestBlockChanged, Finalized, Initialized, NewBlock};
/// Given some events, returns a follow stream getter that we can use in
/// place of the usual RPC method.
pub fn test_stream_getter<Hash, F, I>(events: F) -> FollowEventStreamGetter<Hash>
where
Hash: Send + 'static,
F: Fn() -> I + Send + 'static,
I: IntoIterator<Item = Result<FollowEvent<Hash>, BackendError>>,
{
let start_idx = Arc::new(AtomicUsize::new(0));
Box::new(move || {
// Start the events from where we left off last time.
let start_idx = start_idx.clone();
let this_idx = start_idx.load(Ordering::Relaxed);
let events: Vec<_> = events().into_iter().skip(this_idx).collect();
Box::pin(async move {
// Increment start_idx for each event we see, so that if we get
// the stream again, we get only the remaining events for it.
let stream = futures::stream::iter(events).map(move |ev| {
start_idx.fetch_add(1, Ordering::Relaxed);
ev
});
let stream: FollowEventStream<Hash> = Box::pin(stream);
Ok((stream, format!("sub_id_{this_idx}")))
})
})
}
/// An initialized event
pub fn ev_initialized(n: u64) -> FollowEvent<H256> {
FollowEvent::Initialized(Initialized {
finalized_block_hashes: vec![H256::from_low_u64_le(n)],
finalized_block_runtime: None,
})
}
/// A new block event
pub fn ev_new_block(parent_n: u64, n: u64) -> FollowEvent<H256> {
FollowEvent::NewBlock(NewBlock {
parent_block_hash: H256::from_low_u64_le(parent_n),
block_hash: H256::from_low_u64_le(n),
new_runtime: None,
})
}
/// A best block event
pub fn ev_best_block(n: u64) -> FollowEvent<H256> {
FollowEvent::BestBlockChanged(BestBlockChanged {
best_block_hash: H256::from_low_u64_le(n),
})
}
/// A finalized event
pub fn ev_finalized(
finalized_ns: impl IntoIterator<Item = u64>,
pruned_ns: impl IntoIterator<Item = u64>,
) -> FollowEvent<H256> {
FollowEvent::Finalized(Finalized {
finalized_block_hashes: finalized_ns
.into_iter()
.map(H256::from_low_u64_le)
.collect(),
pruned_block_hashes: pruned_ns.into_iter().map(H256::from_low_u64_le).collect(),
})
}
}
#[cfg(test)]
pub mod test {
use super::*;
use test_utils::{ev_initialized, ev_new_block, test_stream_getter};
#[tokio::test]
async fn follow_stream_provides_messages_until_error() {
// The events we'll get back on the stream.
let stream_getter = test_stream_getter(|| {
[
Ok(ev_initialized(1)),
// Stop should lead to a drop and resubscribe:
Ok(FollowEvent::Stop),
Ok(FollowEvent::Stop),
Ok(ev_new_block(1, 2)),
// Nothing should be emitted after an error:
Err(BackendError::Other("ended".to_owned())),
Ok(ev_new_block(2, 3)),
]
});
let s = FollowStream::new(stream_getter);
let out: Vec<_> = s.filter_map(async |e| e.ok()).collect().await;
// The expected response, given the above.
assert_eq!(
out,
vec![
FollowStreamMsg::Ready("sub_id_0".to_owned()),
FollowStreamMsg::Event(ev_initialized(1)),
FollowStreamMsg::Event(FollowEvent::Stop),
FollowStreamMsg::Ready("sub_id_2".to_owned()),
FollowStreamMsg::Event(FollowEvent::Stop),
FollowStreamMsg::Ready("sub_id_3".to_owned()),
FollowStreamMsg::Event(ev_new_block(1, 2)),
]
);
}
}
@@ -0,0 +1,755 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
use super::follow_stream_unpin::{BlockRef, FollowStreamMsg, FollowStreamUnpin};
use crate::config::Hash;
use crate::error::{BackendError, RpcError};
use futures::stream::{Stream, StreamExt};
use std::collections::{HashMap, HashSet, VecDeque};
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll, Waker};
use subxt_rpcs::methods::chain_head::{FollowEvent, Initialized, RuntimeEvent};
/// A `Stream` which builds on `FollowStreamDriver`, and allows multiple subscribers to obtain events
/// from the single underlying subscription (each being provided an `Initialized` message and all new
/// blocks since then, as if they were each creating a unique `chainHead_follow` subscription). This
/// is the "top" layer of our follow stream subscriptions, and the one that's interacted with elsewhere.
#[derive(Debug)]
pub struct FollowStreamDriver<H: Hash> {
inner: FollowStreamUnpin<H>,
shared: Shared<H>,
}
impl<H: Hash> FollowStreamDriver<H> {
/// Create a new [`FollowStreamDriver`]. This must be polled by some executor
/// in order for any progress to be made. Things can subscribe to events.
pub fn new(follow_unpin: FollowStreamUnpin<H>) -> Self {
Self {
inner: follow_unpin,
shared: Shared::default(),
}
}
/// Return a handle from which we can create new subscriptions to follow events.
pub fn handle(&self) -> FollowStreamDriverHandle<H> {
FollowStreamDriverHandle {
shared: self.shared.clone(),
}
}
}
impl<H: Hash> Stream for FollowStreamDriver<H> {
type Item = Result<(), BackendError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match self.inner.poll_next_unpin(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(None) => {
// Mark ourselves as done so that everything can end.
self.shared.done();
Poll::Ready(None)
}
Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
Poll::Ready(Some(Ok(item))) => {
// Push item to any subscribers.
self.shared.push_item(item);
Poll::Ready(Some(Ok(())))
}
}
}
}
/// A handle that can be used to create subscribers, but that doesn't
/// itself subscribe to events.
#[derive(Debug, Clone)]
pub struct FollowStreamDriverHandle<H: Hash> {
shared: Shared<H>,
}
impl<H: Hash> FollowStreamDriverHandle<H> {
/// Subscribe to follow events.
pub fn subscribe(&self) -> FollowStreamDriverSubscription<H> {
self.shared.subscribe()
}
}
/// A subscription to events from the [`FollowStreamDriver`]. All subscriptions
/// begin first with a `Ready` event containing the current subscription ID, and
/// then with an `Initialized` event containing the latest finalized block and latest
/// runtime information, and then any new/best block events and so on received since
/// the latest finalized block.
#[derive(Debug)]
pub struct FollowStreamDriverSubscription<H: Hash> {
id: usize,
done: bool,
shared: Shared<H>,
local_items: VecDeque<FollowStreamMsg<BlockRef<H>>>,
}
impl<H: Hash> Stream for FollowStreamDriverSubscription<H> {
type Item = FollowStreamMsg<BlockRef<H>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.done {
return Poll::Ready(None);
}
loop {
if let Some(item) = self.local_items.pop_front() {
return Poll::Ready(Some(item));
}
let items = self.shared.take_items_and_save_waker(self.id, cx.waker());
// If no items left, mark locally as done (to avoid further locking)
// and return None to signal done-ness.
let Some(items) = items else {
self.done = true;
return Poll::Ready(None);
};
// No items? We've saved the waker so we'll be told when more come.
// Else, save the items locally and loop around to pop from them.
if items.is_empty() {
return Poll::Pending;
} else {
self.local_items = items;
}
}
}
}
impl<H: Hash> FollowStreamDriverSubscription<H> {
/// Return the current subscription ID. If the subscription has stopped, then this will
/// wait until a new subscription has started with a new ID.
pub async fn subscription_id(self) -> Option<String> {
let ready_event = self
.skip_while(|ev| std::future::ready(!matches!(ev, FollowStreamMsg::Ready(_))))
.next()
.await?;
match ready_event {
FollowStreamMsg::Ready(sub_id) => Some(sub_id),
_ => None,
}
}
/// Subscribe to the follow events, ignoring any other messages.
pub fn events(self) -> impl Stream<Item = FollowEvent<BlockRef<H>>> + Send + Sync {
self.filter_map(|ev| std::future::ready(ev.into_event()))
}
}
impl<H: Hash> Clone for FollowStreamDriverSubscription<H> {
fn clone(&self) -> Self {
self.shared.subscribe()
}
}
impl<H: Hash> Drop for FollowStreamDriverSubscription<H> {
fn drop(&mut self) {
self.shared.remove_sub(self.id);
}
}
/// Locked shared state. The driver stream will access this state to push
/// events to any subscribers, and subscribers will access it to pull the
/// events destined for themselves.
#[derive(Debug, Clone)]
struct Shared<H: Hash>(Arc<Mutex<SharedState<H>>>);
#[derive(Debug)]
struct SharedState<H: Hash> {
done: bool,
next_id: usize,
subscribers: HashMap<usize, SubscriberDetails<H>>,
/// Keep a buffer of all events that should be handed to a new subscription.
block_events_for_new_subscriptions: VecDeque<FollowEvent<BlockRef<H>>>,
// Keep track of the subscription ID we send out on new subs.
current_subscription_id: Option<String>,
// Keep track of the init message we send out on new subs.
current_init_message: Option<Initialized<BlockRef<H>>>,
// Runtime events by block hash; we need to track these to know
// whether the runtime has changed when we see a finalized block notification.
seen_runtime_events: HashMap<H, RuntimeEvent>,
}
impl<H: Hash> Default for Shared<H> {
fn default() -> Self {
Shared(Arc::new(Mutex::new(SharedState {
next_id: 1,
done: false,
subscribers: HashMap::new(),
current_init_message: None,
current_subscription_id: None,
seen_runtime_events: HashMap::new(),
block_events_for_new_subscriptions: VecDeque::new(),
})))
}
}
impl<H: Hash> Shared<H> {
/// Set the shared state to "done"; no more items will be handed to it.
pub fn done(&self) {
let mut shared = self.0.lock().unwrap();
shared.done = true;
// Wake up all subscribers so they get notified that the backend was closed
for details in shared.subscribers.values_mut() {
if let Some(waker) = details.waker.take() {
waker.wake();
}
}
}
/// Cleanup a subscription.
pub fn remove_sub(&self, sub_id: usize) {
let mut shared = self.0.lock().unwrap();
shared.subscribers.remove(&sub_id);
}
/// Take items for some subscription ID and save the waker.
pub fn take_items_and_save_waker(
&self,
sub_id: usize,
waker: &Waker,
) -> Option<VecDeque<FollowStreamMsg<BlockRef<H>>>> {
let mut shared = self.0.lock().unwrap();
let is_done = shared.done;
let details = shared.subscribers.get_mut(&sub_id)?;
// no more items to pull, and stream closed, so return None.
if details.items.is_empty() && is_done {
return None;
}
// else, take whatever items, and save the waker if not done yet.
let items = std::mem::take(&mut details.items);
if !is_done {
details.waker = Some(waker.clone());
}
Some(items)
}
/// Push a new item out to subscribers.
pub fn push_item(&self, item: FollowStreamMsg<BlockRef<H>>) {
let mut shared = self.0.lock().unwrap();
let shared = shared.deref_mut();
// broadcast item to subscribers:
for details in shared.subscribers.values_mut() {
details.items.push_back(item.clone());
if let Some(waker) = details.waker.take() {
waker.wake();
}
}
// Keep our buffer of ready/block events up-to-date:
match item {
FollowStreamMsg::Ready(sub_id) => {
// Set new subscription ID when it comes in.
shared.current_subscription_id = Some(sub_id);
}
FollowStreamMsg::Event(FollowEvent::Initialized(ev)) => {
// New subscriptions will be given this init message:
shared.current_init_message = Some(ev.clone());
// Clear block cache (since a new finalized block hash is seen):
shared.block_events_for_new_subscriptions.clear();
}
FollowStreamMsg::Event(FollowEvent::Finalized(finalized_ev)) => {
// Update the init message that we'll hand out to new subscriptions. If the init message
// is `None` for some reason, we just ignore this step.
if let Some(init_message) = &mut shared.current_init_message {
// Find the latest runtime update that's been finalized.
let newest_runtime = finalized_ev
.finalized_block_hashes
.iter()
.rev()
.filter_map(|h| shared.seen_runtime_events.get(&h.hash()).cloned())
.next();
shared.seen_runtime_events.clear();
init_message
.finalized_block_hashes
.clone_from(&finalized_ev.finalized_block_hashes);
if let Some(runtime_ev) = newest_runtime {
init_message.finalized_block_runtime = Some(runtime_ev);
}
}
// The last finalized block will be reported as Initialized by our driver,
// therefore there is no need to report NewBlock and BestBlock events for it.
// If the Finalized event reported multiple finalized hashes, we only care about
// the state at the head of the chain, therefore it is correct to remove those as well.
// Idem for the pruned hashes; they will never be reported again and we remove
// them from the window of events.
let to_remove: HashSet<H> = finalized_ev
.finalized_block_hashes
.iter()
.chain(finalized_ev.pruned_block_hashes.iter())
.map(|h| h.hash())
.collect();
shared
.block_events_for_new_subscriptions
.retain(|ev| match ev {
FollowEvent::NewBlock(new_block_ev) => {
!to_remove.contains(&new_block_ev.block_hash.hash())
}
FollowEvent::BestBlockChanged(best_block_ev) => {
!to_remove.contains(&best_block_ev.best_block_hash.hash())
}
_ => true,
});
}
FollowStreamMsg::Event(FollowEvent::NewBlock(new_block_ev)) => {
// If a new runtime is seen, note it so that when a block is finalized, we
// can associate that with a runtime update having happened.
if let Some(runtime_event) = &new_block_ev.new_runtime {
shared
.seen_runtime_events
.insert(new_block_ev.block_hash.hash(), runtime_event.clone());
}
shared
.block_events_for_new_subscriptions
.push_back(FollowEvent::NewBlock(new_block_ev));
}
FollowStreamMsg::Event(ev @ FollowEvent::BestBlockChanged(_)) => {
shared.block_events_for_new_subscriptions.push_back(ev);
}
FollowStreamMsg::Event(FollowEvent::Stop) => {
// On a stop event, clear everything. Wait for resubscription and new ready/initialised events.
shared.block_events_for_new_subscriptions.clear();
shared.current_subscription_id = None;
shared.current_init_message = None;
}
_ => {
// We don't buffer any other events.
}
}
}
/// Create a new subscription.
pub fn subscribe(&self) -> FollowStreamDriverSubscription<H> {
let mut shared = self.0.lock().unwrap();
let id = shared.next_id;
shared.next_id += 1;
shared.subscribers.insert(
id,
SubscriberDetails {
items: VecDeque::new(),
waker: None,
},
);
// Any new subscription should start with a "Ready" message and then an "Initialized"
// message, and then any non-finalized block events since that. If these don't exist,
// it means the subscription is currently stopped, and we should expect new Ready/Init
// messages anyway once it restarts.
let mut local_items = VecDeque::new();
if let Some(sub_id) = &shared.current_subscription_id {
local_items.push_back(FollowStreamMsg::Ready(sub_id.clone()));
}
if let Some(init_msg) = &shared.current_init_message {
local_items.push_back(FollowStreamMsg::Event(FollowEvent::Initialized(
init_msg.clone(),
)));
}
for ev in &shared.block_events_for_new_subscriptions {
local_items.push_back(FollowStreamMsg::Event(ev.clone()));
}
drop(shared);
FollowStreamDriverSubscription {
id,
done: false,
shared: self.clone(),
local_items,
}
}
}
/// Details for a given subscriber: any items it's not yet claimed,
/// and a way to wake it up when there are more items for it.
#[derive(Debug)]
struct SubscriberDetails<H: Hash> {
items: VecDeque<FollowStreamMsg<BlockRef<H>>>,
waker: Option<Waker>,
}
/// A stream that subscribes to finalized blocks
/// and indicates whether a block was missed if was restarted.
#[derive(Debug)]
pub struct FollowStreamFinalizedHeads<H: Hash, F> {
stream: FollowStreamDriverSubscription<H>,
sub_id: Option<String>,
last_seen_block: Option<BlockRef<H>>,
f: F,
is_done: bool,
}
impl<H: Hash, F> Unpin for FollowStreamFinalizedHeads<H, F> {}
impl<H, F> FollowStreamFinalizedHeads<H, F>
where
H: Hash,
F: Fn(FollowEvent<BlockRef<H>>) -> Vec<BlockRef<H>>,
{
pub fn new(stream: FollowStreamDriverSubscription<H>, f: F) -> Self {
Self {
stream,
sub_id: None,
last_seen_block: None,
f,
is_done: false,
}
}
}
impl<H, F> Stream for FollowStreamFinalizedHeads<H, F>
where
H: Hash,
F: Fn(FollowEvent<BlockRef<H>>) -> Vec<BlockRef<H>>,
{
type Item = Result<(String, Vec<BlockRef<H>>), BackendError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.is_done {
return Poll::Ready(None);
}
loop {
let Some(ev) = futures::ready!(self.stream.poll_next_unpin(cx)) else {
self.is_done = true;
return Poll::Ready(None);
};
let block_refs = match ev {
FollowStreamMsg::Ready(sub_id) => {
self.sub_id = Some(sub_id);
continue;
}
FollowStreamMsg::Event(FollowEvent::Finalized(finalized)) => {
self.last_seen_block = finalized.finalized_block_hashes.last().cloned();
(self.f)(FollowEvent::Finalized(finalized))
}
FollowStreamMsg::Event(FollowEvent::Initialized(mut init)) => {
let prev = self.last_seen_block.take();
self.last_seen_block = init.finalized_block_hashes.last().cloned();
if let Some(p) = prev {
let Some(pos) = init
.finalized_block_hashes
.iter()
.position(|b| b.hash() == p.hash())
else {
return Poll::Ready(Some(Err(RpcError::ClientError(
subxt_rpcs::Error::DisconnectedWillReconnect(
"Missed at least one block when the connection was lost"
.to_owned(),
),
)
.into())));
};
// If we got older blocks than `prev`, we need to remove them
// because they should already have been sent at this point.
init.finalized_block_hashes.drain(0..=pos);
}
(self.f)(FollowEvent::Initialized(init))
}
FollowStreamMsg::Event(ev) => (self.f)(ev),
};
if block_refs.is_empty() {
continue;
}
let sub_id = self
.sub_id
.clone()
.expect("Ready is always emitted before any other event");
return Poll::Ready(Some(Ok((sub_id, block_refs))));
}
}
}
#[cfg(test)]
mod test_utils {
use super::super::follow_stream_unpin::test_utils::test_unpin_stream_getter;
use super::*;
/// Return a `FollowStreamDriver`
pub fn test_follow_stream_driver_getter<H, F, I>(
events: F,
max_life: usize,
) -> FollowStreamDriver<H>
where
H: Hash + 'static,
F: Fn() -> I + Send + 'static,
I: IntoIterator<Item = Result<FollowEvent<H>, BackendError>>,
{
let (stream, _) = test_unpin_stream_getter(events, max_life);
FollowStreamDriver::new(stream)
}
}
#[cfg(test)]
mod test {
use futures::TryStreamExt;
use primitive_types::H256;
use super::super::follow_stream::test_utils::{
ev_best_block, ev_finalized, ev_initialized, ev_new_block,
};
use super::super::follow_stream_unpin::test_utils::{
ev_best_block_ref, ev_finalized_ref, ev_initialized_ref, ev_new_block_ref,
};
use super::test_utils::test_follow_stream_driver_getter;
use super::*;
#[test]
fn follow_stream_driver_is_sendable() {
fn assert_send<T: Send + 'static>(_: T) {}
let stream_getter = test_follow_stream_driver_getter(|| [Ok(ev_initialized(1))], 10);
assert_send(stream_getter);
}
#[tokio::test]
async fn subscribers_all_receive_events_and_finish_gracefully_on_error() {
let mut driver = test_follow_stream_driver_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(ev_new_block(0, 1)),
Ok(ev_best_block(1)),
Ok(ev_finalized([1], [])),
Err(BackendError::Other("ended".to_owned())),
]
},
10,
);
let handle = driver.handle();
let a = handle.subscribe();
let b = handle.subscribe();
let c = handle.subscribe();
// Drive to completion (the sort of real life usage I'd expect):
tokio::spawn(async move { while driver.next().await.is_some() {} });
let a_vec: Vec<_> = a.collect().await;
let b_vec: Vec<_> = b.collect().await;
let c_vec: Vec<_> = c.collect().await;
let expected = vec![
FollowStreamMsg::Ready("sub_id_0".into()),
FollowStreamMsg::Event(ev_initialized_ref(0)),
FollowStreamMsg::Event(ev_new_block_ref(0, 1)),
FollowStreamMsg::Event(ev_best_block_ref(1)),
FollowStreamMsg::Event(ev_finalized_ref([1])),
];
assert_eq!(a_vec, expected);
assert_eq!(b_vec, expected);
assert_eq!(c_vec, expected);
}
#[tokio::test]
async fn subscribers_receive_block_events_from_last_finalised() {
let mut driver = test_follow_stream_driver_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(ev_new_block(0, 1)),
Ok(ev_best_block(1)),
Ok(ev_finalized([1], [])),
Ok(ev_new_block(1, 2)),
Ok(ev_new_block(2, 3)),
Err(BackendError::Other("ended".to_owned())),
]
},
10,
);
// Skip past ready, init, new, best events.
let _r = driver.next().await.unwrap();
let _i0 = driver.next().await.unwrap();
let _n1 = driver.next().await.unwrap();
let _b1 = driver.next().await.unwrap();
// THEN subscribe; subscription should still receive them:
let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await;
let expected = vec![
FollowStreamMsg::Ready("sub_id_0".into()),
FollowStreamMsg::Event(ev_initialized_ref(0)),
FollowStreamMsg::Event(ev_new_block_ref(0, 1)),
FollowStreamMsg::Event(ev_best_block_ref(1)),
];
assert_eq!(evs, expected);
// Skip past finalized 1, new 2, new 3 events
let _f1 = driver.next().await.unwrap();
let _n2 = driver.next().await.unwrap();
let _n3 = driver.next().await.unwrap();
// THEN subscribe again; new subs will see an updated initialized message
// with the latest finalized block hash.
let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await;
let expected = vec![
FollowStreamMsg::Ready("sub_id_0".into()),
FollowStreamMsg::Event(ev_initialized_ref(1)),
FollowStreamMsg::Event(ev_new_block_ref(1, 2)),
FollowStreamMsg::Event(ev_new_block_ref(2, 3)),
];
assert_eq!(evs, expected);
}
#[tokio::test]
async fn subscribers_receive_new_blocks_before_subscribing() {
let mut driver = test_follow_stream_driver_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(ev_new_block(0, 1)),
Ok(ev_best_block(1)),
Ok(ev_new_block(1, 2)),
Ok(ev_new_block(2, 3)),
Ok(ev_finalized([1], [])),
Err(BackendError::Other("ended".to_owned())),
]
},
10,
);
// Skip to the first finalized block F1.
let _r = driver.next().await.unwrap();
let _i0 = driver.next().await.unwrap();
let _n1 = driver.next().await.unwrap();
let _b1 = driver.next().await.unwrap();
let _n2 = driver.next().await.unwrap();
let _n3 = driver.next().await.unwrap();
let _f1 = driver.next().await.unwrap();
// THEN subscribe; and make sure new block 1 and 2 are received.
let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await;
let expected = vec![
FollowStreamMsg::Ready("sub_id_0".into()),
FollowStreamMsg::Event(ev_initialized_ref(1)),
FollowStreamMsg::Event(ev_new_block_ref(1, 2)),
FollowStreamMsg::Event(ev_new_block_ref(2, 3)),
];
assert_eq!(evs, expected);
}
#[tokio::test]
async fn subscribe_finalized_blocks_restart_works() {
let mut driver = test_follow_stream_driver_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(ev_new_block(0, 1)),
Ok(ev_best_block(1)),
Ok(ev_finalized([1], [])),
Ok(FollowEvent::Stop),
Ok(ev_initialized(1)),
Ok(ev_finalized([2], [])),
Err(BackendError::Other("ended".to_owned())),
]
},
10,
);
let handle = driver.handle();
tokio::spawn(async move { while driver.next().await.is_some() {} });
let f = |ev| match ev {
FollowEvent::Finalized(ev) => ev.finalized_block_hashes,
FollowEvent::Initialized(ev) => ev.finalized_block_hashes,
_ => vec![],
};
let stream = FollowStreamFinalizedHeads::new(handle.subscribe(), f);
let evs: Vec<_> = stream.try_collect().await.unwrap();
let expected = vec![
(
"sub_id_0".to_string(),
vec![BlockRef::new(H256::from_low_u64_le(0))],
),
(
"sub_id_0".to_string(),
vec![BlockRef::new(H256::from_low_u64_le(1))],
),
(
"sub_id_5".to_string(),
vec![BlockRef::new(H256::from_low_u64_le(2))],
),
];
assert_eq!(evs, expected);
}
#[tokio::test]
async fn subscribe_finalized_blocks_restart_with_missed_blocks() {
let mut driver = test_follow_stream_driver_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(FollowEvent::Stop),
// Emulate that we missed some blocks.
Ok(ev_initialized(13)),
Ok(ev_finalized([14], [])),
Err(BackendError::Other("ended".to_owned())),
]
},
10,
);
let handle = driver.handle();
tokio::spawn(async move { while driver.next().await.is_some() {} });
let f = |ev| match ev {
FollowEvent::Finalized(ev) => ev.finalized_block_hashes,
FollowEvent::Initialized(ev) => ev.finalized_block_hashes,
_ => vec![],
};
let evs: Vec<_> = FollowStreamFinalizedHeads::new(handle.subscribe(), f)
.collect()
.await;
assert_eq!(
evs[0].as_ref().unwrap(),
&(
"sub_id_0".to_string(),
vec![BlockRef::new(H256::from_low_u64_le(0))]
)
);
assert!(
matches!(&evs[1], Err(BackendError::Rpc(RpcError::ClientError(subxt_rpcs::Error::DisconnectedWillReconnect(e)))) if e.contains("Missed at least one block when the connection was lost"))
);
assert_eq!(
evs[2].as_ref().unwrap(),
&(
"sub_id_2".to_string(),
vec![BlockRef::new(H256::from_low_u64_le(14))]
)
);
}
}
@@ -0,0 +1,813 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
use super::ChainHeadRpcMethods;
use super::follow_stream::FollowStream;
use crate::config::{Config, Hash, HashFor, RpcConfigFor};
use crate::error::BackendError;
use futures::stream::{FuturesUnordered, Stream, StreamExt};
use subxt_rpcs::methods::chain_head::{
BestBlockChanged, Finalized, FollowEvent, Initialized, NewBlock,
};
use std::collections::{HashMap, HashSet};
use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll, Waker};
/// The type of stream item.
pub use super::follow_stream::FollowStreamMsg;
/// A `Stream` which builds on `FollowStream`, and handles pinning. It replaces any block hash seen in
/// the follow events with a `BlockRef` which, when all clones are dropped, will lead to an "unpin" call
/// for that block hash being queued. It will also automatically unpin any blocks that exceed a given max
/// age, to try and prevent the underlying stream from ending (and _all_ blocks from being unpinned as a
/// result). Put simply, it tries to keep every block pinned as long as possible until the block is no longer
/// used anywhere.
#[derive(Debug)]
pub struct FollowStreamUnpin<H: Hash> {
// The underlying stream of events.
inner: FollowStream<H>,
// A method to call to unpin a block, given a block hash and a subscription ID.
unpin_method: UnpinMethodHolder<H>,
// Futures for sending unpin events that we'll poll to completion as
// part of polling the stream as a whole.
unpin_futs: FuturesUnordered<UnpinFut>,
// Each time a new finalized block is seen, we give it an age of `next_rel_block_age`,
// and then increment this ready for the next finalized block. So, the first finalized
// block will have an age of 0, the next 1, 2, 3 and so on. We can then use `max_block_life`
// to say "unpin all blocks with an age < (next_rel_block_age-1) - max_block_life".
next_rel_block_age: usize,
// The latest ID of the FollowStream subscription, which we can use
// to unpin blocks.
subscription_id: Option<Arc<str>>,
// The longest period a block can be pinned for.
max_block_life: usize,
// The currently seen and pinned blocks.
pinned: HashMap<H, PinnedDetails<H>>,
// Shared state about blocks we've flagged to unpin from elsewhere
unpin_flags: UnpinFlags<H>,
}
// Just a wrapper to make implementing debug on the whole thing easier.
struct UnpinMethodHolder<H>(UnpinMethod<H>);
impl<H> std::fmt::Debug for UnpinMethodHolder<H> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"UnpinMethodHolder(Box<dyn FnMut(Hash, Arc<str>) -> UnpinFut>)"
)
}
}
/// The type of the unpin method that we need to provide.
pub type UnpinMethod<H> = Box<dyn FnMut(H, Arc<str>) -> UnpinFut + Send>;
/// The future returned from [`UnpinMethod`].
pub type UnpinFut = Pin<Box<dyn Future<Output = ()> + Send + 'static>>;
impl<H: Hash> std::marker::Unpin for FollowStreamUnpin<H> {}
impl<H: Hash> Stream for FollowStreamUnpin<H> {
type Item = Result<FollowStreamMsg<BlockRef<H>>, BackendError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut();
loop {
// Poll any queued unpin tasks.
let unpin_futs_are_pending = match this.unpin_futs.poll_next_unpin(cx) {
Poll::Ready(Some(())) => continue,
Poll::Ready(None) => false,
Poll::Pending => true,
};
// Poll the inner stream for the next event.
let Poll::Ready(ev) = this.inner.poll_next_unpin(cx) else {
return Poll::Pending;
};
let Some(ev) = ev else {
// if the stream is done, but `unpin_futs` are still pending, then
// return pending here so that they are still driven to completion.
// Else, return `Ready(None)` to signal nothing left to do.
return match unpin_futs_are_pending {
true => Poll::Pending,
false => Poll::Ready(None),
};
};
// Error? just return it and do nothing further.
let ev = match ev {
Ok(ev) => ev,
Err(e) => {
return Poll::Ready(Some(Err(e)));
}
};
// React to any actual FollowEvent we get back.
let ev = match ev {
FollowStreamMsg::Ready(subscription_id) => {
// update the subscription ID we'll use to unpin things.
this.subscription_id = Some(subscription_id.clone().into());
FollowStreamMsg::Ready(subscription_id)
}
FollowStreamMsg::Event(FollowEvent::Initialized(details)) => {
let mut finalized_block_hashes =
Vec::with_capacity(details.finalized_block_hashes.len());
// Pin each of the finalized blocks. None of them will show up again (except as a
// parent block), and so they can all be unpinned immediately at any time. Increment
// the block age for each one, so that older finalized blocks are pruned first.
for finalized_block in &details.finalized_block_hashes {
let rel_block_age = this.next_rel_block_age;
let block_ref =
this.pin_unpinnable_block_at(rel_block_age, *finalized_block);
finalized_block_hashes.push(block_ref);
this.next_rel_block_age += 1;
}
FollowStreamMsg::Event(FollowEvent::Initialized(Initialized {
finalized_block_hashes,
finalized_block_runtime: details.finalized_block_runtime,
}))
}
FollowStreamMsg::Event(FollowEvent::NewBlock(details)) => {
// One bigger than our parent, and if no parent seen (maybe it was
// unpinned already), then one bigger than the last finalized block num
// as a best guess.
let parent_rel_block_age = this
.pinned
.get(&details.parent_block_hash)
.map(|p| p.rel_block_age)
.unwrap_or(this.next_rel_block_age.saturating_sub(1));
let block_ref = this.pin_block_at(parent_rel_block_age + 1, details.block_hash);
let parent_block_ref =
this.pin_block_at(parent_rel_block_age, details.parent_block_hash);
FollowStreamMsg::Event(FollowEvent::NewBlock(NewBlock {
block_hash: block_ref,
parent_block_hash: parent_block_ref,
new_runtime: details.new_runtime,
}))
}
FollowStreamMsg::Event(FollowEvent::BestBlockChanged(details)) => {
// We expect this block to already exist, so it'll keep its existing block_num,
// but worst case it'll just get the current finalized block_num + 1.
let rel_block_age = this.next_rel_block_age;
let block_ref = this.pin_block_at(rel_block_age, details.best_block_hash);
FollowStreamMsg::Event(FollowEvent::BestBlockChanged(BestBlockChanged {
best_block_hash: block_ref,
}))
}
FollowStreamMsg::Event(FollowEvent::Finalized(details)) => {
let finalized_block_refs: Vec<_> = details
.finalized_block_hashes
.into_iter()
.enumerate()
.map(|(idx, hash)| {
// These blocks _should_ exist already and so will have a known block num,
// but if they don't, we just increment the num from the last finalized block
// we saw, which should be accurate.
//
// `pin_unpinnable_block_at` indicates that the block will not show up in future events
// (They will show up as a parent block, but we don't care about that right now).
let rel_block_age = this.next_rel_block_age + idx;
this.pin_unpinnable_block_at(rel_block_age, hash)
})
.collect();
// Our relative block height is increased by however many finalized
// blocks we've seen.
this.next_rel_block_age += finalized_block_refs.len();
let pruned_block_refs: Vec<_> = details
.pruned_block_hashes
.into_iter()
.map(|hash| {
// We should know about these, too, and if not we set their age to last_finalized + 1.
//
// `pin_unpinnable_block_at` indicates that the block will not show up in future events.
let rel_block_age = this.next_rel_block_age;
this.pin_unpinnable_block_at(rel_block_age, hash)
})
.collect();
// At this point, we also check to see which blocks we should submit unpin events
// for. We will unpin:
// - Any block that's older than the max age.
// - Any block that has no references left (ie has been dropped) that _also_ has
// showed up in the pruned list in a finalized event (so it will never be in another event).
this.unpin_blocks(cx.waker());
FollowStreamMsg::Event(FollowEvent::Finalized(Finalized {
finalized_block_hashes: finalized_block_refs,
pruned_block_hashes: pruned_block_refs,
}))
}
FollowStreamMsg::Event(FollowEvent::Stop) => {
// clear out "old" things that are no longer applicable since
// the subscription has ended (a new one will be created under the hood, at
// which point we'll get given a new subscription ID.
this.subscription_id = None;
this.pinned.clear();
this.unpin_futs.clear();
this.unpin_flags.lock().unwrap().clear();
this.next_rel_block_age = 0;
FollowStreamMsg::Event(FollowEvent::Stop)
}
// These events aren't interesting; we just forward them on:
FollowStreamMsg::Event(FollowEvent::OperationBodyDone(details)) => {
FollowStreamMsg::Event(FollowEvent::OperationBodyDone(details))
}
FollowStreamMsg::Event(FollowEvent::OperationCallDone(details)) => {
FollowStreamMsg::Event(FollowEvent::OperationCallDone(details))
}
FollowStreamMsg::Event(FollowEvent::OperationStorageItems(details)) => {
FollowStreamMsg::Event(FollowEvent::OperationStorageItems(details))
}
FollowStreamMsg::Event(FollowEvent::OperationWaitingForContinue(details)) => {
FollowStreamMsg::Event(FollowEvent::OperationWaitingForContinue(details))
}
FollowStreamMsg::Event(FollowEvent::OperationStorageDone(details)) => {
FollowStreamMsg::Event(FollowEvent::OperationStorageDone(details))
}
FollowStreamMsg::Event(FollowEvent::OperationInaccessible(details)) => {
FollowStreamMsg::Event(FollowEvent::OperationInaccessible(details))
}
FollowStreamMsg::Event(FollowEvent::OperationError(details)) => {
FollowStreamMsg::Event(FollowEvent::OperationError(details))
}
};
// Return our event.
return Poll::Ready(Some(Ok(ev)));
}
}
}
impl<H: Hash> FollowStreamUnpin<H> {
/// Create a new [`FollowStreamUnpin`].
pub fn new(
follow_stream: FollowStream<H>,
unpin_method: UnpinMethod<H>,
max_block_life: usize,
) -> Self {
Self {
inner: follow_stream,
unpin_method: UnpinMethodHolder(unpin_method),
max_block_life,
pinned: Default::default(),
subscription_id: None,
next_rel_block_age: 0,
unpin_flags: Default::default(),
unpin_futs: Default::default(),
}
}
/// Create a new [`FollowStreamUnpin`] given the RPC methods.
pub fn from_methods<T: Config>(
follow_stream: FollowStream<HashFor<T>>,
methods: ChainHeadRpcMethods<RpcConfigFor<T>>,
max_block_life: usize,
) -> FollowStreamUnpin<HashFor<T>> {
let unpin_method = Box::new(move |hash: HashFor<T>, sub_id: Arc<str>| {
let methods = methods.clone();
let fut: UnpinFut = Box::pin(async move {
// We ignore any errors trying to unpin at the moment.
let _ = methods.chainhead_v1_unpin(&sub_id, hash).await;
});
fut
});
FollowStreamUnpin::new(follow_stream, unpin_method, max_block_life)
}
/// Is the block hash currently pinned.
pub fn is_pinned(&self, hash: &H) -> bool {
self.pinned.contains_key(hash)
}
/// Pin a block, or return the reference to an already-pinned block. If the block has been registered to
/// be unpinned, we'll clear those flags, so that it won't be unpinned. If the unpin request has already
/// been sent though, then the block will be unpinned.
fn pin_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef<H> {
self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, false)
}
/// Pin a block, or return the reference to an already-pinned block.
///
/// This is the same as [`Self::pin_block_at`], except that it also marks the block as being unpinnable now,
/// which should be done for any block that will no longer be seen in future events.
fn pin_unpinnable_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef<H> {
self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, true)
}
fn pin_block_at_setting_unpinnable_flag(
&mut self,
rel_block_age: usize,
hash: H,
can_be_unpinned: bool,
) -> BlockRef<H> {
let entry = self
.pinned
.entry(hash)
// If there's already an entry, then clear any unpin_flags and update the
// can_be_unpinned status (this can become true but cannot become false again
// once true).
.and_modify(|entry| {
entry.can_be_unpinned = entry.can_be_unpinned || can_be_unpinned;
self.unpin_flags.lock().unwrap().remove(&hash);
})
// If there's not an entry already, make one and return it.
.or_insert_with(|| PinnedDetails {
rel_block_age,
block_ref: BlockRef {
inner: Arc::new(BlockRefInner {
hash,
unpin_flags: self.unpin_flags.clone(),
}),
},
can_be_unpinned,
});
entry.block_ref.clone()
}
/// Unpin any blocks that are either too old, or have the unpin flag set and are old enough.
fn unpin_blocks(&mut self, waker: &Waker) {
let mut unpin_flags = self.unpin_flags.lock().unwrap();
// This gets the age of the last finalized block.
let rel_block_age = self.next_rel_block_age.saturating_sub(1);
// If we asked to unpin and there was no subscription_id, then there's nothing we can do,
// and nothing will need unpinning now anyway.
let Some(sub_id) = &self.subscription_id else {
return;
};
let mut blocks_to_unpin = vec![];
for (hash, details) in &self.pinned {
if rel_block_age.saturating_sub(details.rel_block_age) >= self.max_block_life
|| (unpin_flags.contains(hash) && details.can_be_unpinned)
{
// The block is too old, or it's been flagged to be unpinned and won't be in a future
// backend event, so we can unpin it for real now.
blocks_to_unpin.push(*hash);
// Clear it from our unpin flags if present so that we don't try to unpin it again.
unpin_flags.remove(hash);
}
}
// Release our lock on unpin_flags ASAP.
drop(unpin_flags);
// No need to call the waker etc if nothing to do:
if blocks_to_unpin.is_empty() {
return;
}
for hash in blocks_to_unpin {
self.pinned.remove(&hash);
let fut = (self.unpin_method.0)(hash, sub_id.clone());
self.unpin_futs.push(fut);
}
// Any new futures pushed above need polling to start. We could
// just wait for the next stream event, but let's wake the task to
// have it polled sooner, just in case it's slow to receive things.
waker.wake_by_ref();
}
}
// The set of block hashes that can be unpinned when ready.
// BlockRefs write to this when they are dropped.
type UnpinFlags<H> = Arc<Mutex<HashSet<H>>>;
#[derive(Debug)]
struct PinnedDetails<H: Hash> {
/// Relatively speaking, how old is the block? When we start following
/// blocks, the first finalized block gets an age of 0, the second an age
/// of 1 and so on.
rel_block_age: usize,
/// A block ref we can hand out to keep blocks pinned.
/// Because we store one here until it's unpinned, the live count
/// will only drop to 1 when no external refs are left.
block_ref: BlockRef<H>,
/// Has this block showed up in the list of pruned blocks, or has it
/// been finalized? In this case, it can now been pinned as it won't
/// show up again in future events (except as a "parent block" of some
/// new block, which we're currently ignoring).
can_be_unpinned: bool,
}
/// All blocks reported will be wrapped in this.
#[derive(Debug, Clone)]
pub struct BlockRef<H: Hash> {
inner: Arc<BlockRefInner<H>>,
}
#[derive(Debug)]
struct BlockRefInner<H> {
hash: H,
unpin_flags: UnpinFlags<H>,
}
impl<H: Hash> BlockRef<H> {
/// For testing purposes only, create a BlockRef from a hash
/// that isn't pinned.
#[cfg(test)]
pub fn new(hash: H) -> Self {
BlockRef {
inner: Arc::new(BlockRefInner {
hash,
unpin_flags: Default::default(),
}),
}
}
/// Return the hash for this block.
pub fn hash(&self) -> H {
self.inner.hash
}
}
impl<H: Hash> PartialEq for BlockRef<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.hash == other.inner.hash
}
}
impl<H: Hash> PartialEq<H> for BlockRef<H> {
fn eq(&self, other: &H) -> bool {
&self.inner.hash == other
}
}
impl<H: Hash> Drop for BlockRef<H> {
fn drop(&mut self) {
// PinnedDetails keeps one ref, so if this is the second ref, it's the
// only "external" one left and we should ask to unpin it now. if it's
// the only ref remaining, it means that it's already been unpinned, so
// nothing to do here anyway.
if Arc::strong_count(&self.inner) == 2 {
if let Ok(mut unpin_flags) = self.inner.unpin_flags.lock() {
unpin_flags.insert(self.inner.hash);
}
}
}
}
#[cfg(test)]
pub(super) mod test_utils {
use super::super::follow_stream::{FollowStream, test_utils::test_stream_getter};
use super::*;
use crate::config::substrate::H256;
pub type UnpinRx<H> = std::sync::mpsc::Receiver<(H, Arc<str>)>;
/// Get a [`FollowStreamUnpin`] from an iterator over events.
pub fn test_unpin_stream_getter<H, F, I>(
events: F,
max_life: usize,
) -> (FollowStreamUnpin<H>, UnpinRx<H>)
where
H: Hash + 'static,
F: Fn() -> I + Send + 'static,
I: IntoIterator<Item = Result<FollowEvent<H>, BackendError>>,
{
// Unpin requests will come here so that we can look out for them.
let (unpin_tx, unpin_rx) = std::sync::mpsc::channel();
let follow_stream = FollowStream::new(test_stream_getter(events));
let unpin_method: UnpinMethod<H> = Box::new(move |hash, sub_id| {
unpin_tx.send((hash, sub_id)).unwrap();
Box::pin(std::future::ready(()))
});
let follow_unpin = FollowStreamUnpin::new(follow_stream, unpin_method, max_life);
(follow_unpin, unpin_rx)
}
/// Assert that the unpinned blocks sent from the `UnpinRx` channel match the items given.
pub fn assert_from_unpin_rx<H: Hash + 'static>(
unpin_rx: &UnpinRx<H>,
items: impl IntoIterator<Item = H>,
) {
let expected_hashes = HashSet::<H>::from_iter(items);
for i in 0..expected_hashes.len() {
let Ok((hash, _)) = unpin_rx.try_recv() else {
panic!("Another unpin event is expected, but failed to pull item {i} from channel");
};
assert!(
expected_hashes.contains(&hash),
"Hash {hash:?} was unpinned, but is not expected to have been"
);
}
}
/// An initialized event containing a BlockRef (useful for comparisons)
pub fn ev_initialized_ref(n: u64) -> FollowEvent<BlockRef<H256>> {
FollowEvent::Initialized(Initialized {
finalized_block_hashes: vec![BlockRef::new(H256::from_low_u64_le(n))],
finalized_block_runtime: None,
})
}
/// A new block event containing a BlockRef (useful for comparisons)
pub fn ev_new_block_ref(parent: u64, n: u64) -> FollowEvent<BlockRef<H256>> {
FollowEvent::NewBlock(NewBlock {
parent_block_hash: BlockRef::new(H256::from_low_u64_le(parent)),
block_hash: BlockRef::new(H256::from_low_u64_le(n)),
new_runtime: None,
})
}
/// A best block event containing a BlockRef (useful for comparisons)
pub fn ev_best_block_ref(n: u64) -> FollowEvent<BlockRef<H256>> {
FollowEvent::BestBlockChanged(BestBlockChanged {
best_block_hash: BlockRef::new(H256::from_low_u64_le(n)),
})
}
/// A finalized event containing a BlockRef (useful for comparisons)
pub fn ev_finalized_ref(ns: impl IntoIterator<Item = u64>) -> FollowEvent<BlockRef<H256>> {
FollowEvent::Finalized(Finalized {
finalized_block_hashes: ns
.into_iter()
.map(|h| BlockRef::new(H256::from_low_u64_le(h)))
.collect(),
pruned_block_hashes: vec![],
})
}
}
#[cfg(test)]
mod test {
use super::super::follow_stream::test_utils::{
ev_best_block, ev_finalized, ev_initialized, ev_new_block,
};
use super::test_utils::{assert_from_unpin_rx, ev_new_block_ref, test_unpin_stream_getter};
use super::*;
use crate::config::substrate::H256;
#[tokio::test]
async fn hands_back_blocks() {
let (follow_unpin, _) = test_unpin_stream_getter(
|| {
[
Ok(ev_new_block(0, 1)),
Ok(ev_new_block(1, 2)),
Ok(ev_new_block(2, 3)),
Err(BackendError::Other("ended".to_owned())),
]
},
10,
);
let out: Vec<_> = follow_unpin.filter_map(async |e| e.ok()).collect().await;
assert_eq!(
out,
vec![
FollowStreamMsg::Ready("sub_id_0".into()),
FollowStreamMsg::Event(ev_new_block_ref(0, 1)),
FollowStreamMsg::Event(ev_new_block_ref(1, 2)),
FollowStreamMsg::Event(ev_new_block_ref(2, 3)),
]
);
}
#[tokio::test]
async fn unpins_initialized_block() {
let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(ev_finalized([1], [])),
Err(BackendError::Other("ended".to_owned())),
]
},
3,
);
let _r = follow_unpin.next().await.unwrap().unwrap();
// Drop the initialized block:
let i0 = follow_unpin.next().await.unwrap().unwrap();
drop(i0);
// Let a finalization event occur.
let _f1 = follow_unpin.next().await.unwrap().unwrap();
// Now, initialized block should be unpinned.
assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]);
assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(0)));
}
#[tokio::test]
async fn unpins_old_blocks() {
let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(ev_finalized([1], [])),
Ok(ev_finalized([2], [])),
Ok(ev_finalized([3], [])),
Ok(ev_finalized([4], [])),
Ok(ev_finalized([5], [])),
Err(BackendError::Other("ended".to_owned())),
]
},
3,
);
let _r = follow_unpin.next().await.unwrap().unwrap();
let _i0 = follow_unpin.next().await.unwrap().unwrap();
unpin_rx.try_recv().expect_err("nothing unpinned yet");
let _f1 = follow_unpin.next().await.unwrap().unwrap();
unpin_rx.try_recv().expect_err("nothing unpinned yet");
let _f2 = follow_unpin.next().await.unwrap().unwrap();
unpin_rx.try_recv().expect_err("nothing unpinned yet");
let _f3 = follow_unpin.next().await.unwrap().unwrap();
// Max age is 3, so after block 3 finalized, block 0 becomes too old and is unpinned.
assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]);
let _f4 = follow_unpin.next().await.unwrap().unwrap();
// Block 1 is now too old and is unpinned.
assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]);
let _f5 = follow_unpin.next().await.unwrap().unwrap();
// Block 2 is now too old and is unpinned.
assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(2)]);
}
#[tokio::test]
async fn dropped_new_blocks_should_not_get_unpinned_until_finalization() {
let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(ev_new_block(0, 1)),
Ok(ev_new_block(1, 2)),
Ok(ev_finalized([1], [])),
Ok(ev_finalized([2], [])),
Err(BackendError::Other("ended".to_owned())),
]
},
10,
);
let _r = follow_unpin.next().await.unwrap().unwrap();
let _i0 = follow_unpin.next().await.unwrap().unwrap();
let n1 = follow_unpin.next().await.unwrap().unwrap();
drop(n1);
let n2 = follow_unpin.next().await.unwrap().unwrap();
drop(n2);
// New blocks dropped but still pinned:
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1)));
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2)));
let f1 = follow_unpin.next().await.unwrap().unwrap();
drop(f1);
// After block 1 finalized, both blocks are still pinned because:
// - block 1 was handed back in the finalized event, so will be unpinned next time.
// - block 2 wasn't mentioned in the finalized event, so should not have been unpinned yet.
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1)));
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2)));
let f2 = follow_unpin.next().await.unwrap().unwrap();
drop(f2);
// After block 2 finalized, block 1 can be unpinned finally, but block 2 needs to wait one more event.
assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1)));
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2)));
assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]);
}
#[tokio::test]
async fn dropped_new_blocks_should_not_get_unpinned_until_pruned() {
let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(ev_new_block(0, 1)),
Ok(ev_new_block(1, 2)),
Ok(ev_new_block(1, 3)),
Ok(ev_finalized([1], [])),
Ok(ev_finalized([2], [3])),
Ok(ev_finalized([4], [])),
Err(BackendError::Other("ended".to_owned())),
]
},
10,
);
let _r = follow_unpin.next().await.unwrap().unwrap();
let _i0 = follow_unpin.next().await.unwrap().unwrap();
let n1 = follow_unpin.next().await.unwrap().unwrap();
drop(n1);
let n2 = follow_unpin.next().await.unwrap().unwrap();
drop(n2);
let n3 = follow_unpin.next().await.unwrap().unwrap();
drop(n3);
let f1 = follow_unpin.next().await.unwrap().unwrap();
drop(f1);
// After block 1 is finalized, everything is still pinned because the finalization event
// itself returns 1, and 2/3 aren't finalized or pruned yet.
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1)));
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2)));
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(3)));
let f2 = follow_unpin.next().await.unwrap().unwrap();
drop(f2);
// After the next finalization event, block 1 can finally be unpinned since it was Finalized
// last event _and_ is no longer handed back anywhere. 2 and 3 should still be pinned.
assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1)));
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2)));
assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(3)));
assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]);
let f4 = follow_unpin.next().await.unwrap().unwrap();
drop(f4);
// After some other finalized event, we are now allowed to ditch the previously pruned and
// finalized blocks 2 and 3.
assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(2)));
assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(3)));
assert_from_unpin_rx(
&unpin_rx,
[H256::from_low_u64_le(2), H256::from_low_u64_le(3)],
);
}
#[tokio::test]
async fn never_unpin_new_block_before_finalized() {
// Ensure that if we drop a new block; the pinning is still active until the block is finalized.
let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter(
|| {
[
Ok(ev_initialized(0)),
Ok(ev_new_block(0, 1)),
Ok(ev_new_block(1, 2)),
Ok(ev_best_block(1)),
Ok(ev_finalized([1], [])),
Ok(ev_finalized([2], [])),
Err(BackendError::Other("ended".to_owned())),
]
},
10,
);
let _r = follow_unpin.next().await.unwrap().unwrap();
// drop initialised block 0 and new block 1 and new block 2.
let i0 = follow_unpin.next().await.unwrap().unwrap();
drop(i0);
let n1 = follow_unpin.next().await.unwrap().unwrap();
drop(n1);
let n2 = follow_unpin.next().await.unwrap().unwrap();
drop(n2);
let b1 = follow_unpin.next().await.unwrap().unwrap();
drop(b1);
// Nothing unpinned yet!
unpin_rx.try_recv().expect_err("nothing unpinned yet");
let f1 = follow_unpin.next().await.unwrap().unwrap();
drop(f1);
// After finalization, block 1 is now ready to be unpinned (it won't be seen again),
// but isn't actually unpinned yet (because it was just handed back in f1). Block 0
// however has now been unpinned.
assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(0)));
assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]);
unpin_rx.try_recv().expect_err("nothing unpinned yet");
let f2 = follow_unpin.next().await.unwrap().unwrap();
drop(f2);
// After f2, we can get rid of block 1 now, which was finalized last time.
assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1)));
assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]);
unpin_rx.try_recv().expect_err("nothing unpinned yet");
}
}
+169
View File
@@ -0,0 +1,169 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
use super::follow_stream_driver::FollowStreamDriverHandle;
use super::follow_stream_unpin::BlockRef;
use crate::config::{Config, HashFor, RpcConfigFor};
use crate::error::{BackendError, RpcError};
use futures::{FutureExt, Stream, StreamExt};
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use subxt_rpcs::methods::chain_head::{
ChainHeadRpcMethods, FollowEvent, MethodResponse, StorageQuery, StorageResult,
};
/// Obtain a stream of storage items given some query. this handles continuing
/// and stopping under the hood, and returns a stream of `StorageResult`s.
pub struct StorageItems<T: Config> {
done: bool,
operation_id: Arc<str>,
buffered_responses: VecDeque<StorageResult>,
continue_call: ContinueFutGetter,
continue_fut: Option<ContinueFut>,
follow_event_stream: FollowEventStream<HashFor<T>>,
}
impl<T: Config> StorageItems<T> {
// Subscribe to follow events, and return a stream of storage results
// given some storage queries. The stream will automatically resume as
// needed, and stop when done.
pub async fn from_methods(
queries: impl Iterator<Item = StorageQuery<&[u8]>>,
at: HashFor<T>,
follow_handle: &FollowStreamDriverHandle<HashFor<T>>,
methods: ChainHeadRpcMethods<RpcConfigFor<T>>,
) -> Result<Self, BackendError> {
let sub_id = super::get_subscription_id(follow_handle).await?;
// Subscribe to events and make the initial request to get an operation ID.
let follow_events = follow_handle.subscribe().events();
let status = methods
.chainhead_v1_storage(&sub_id, at, queries, None)
.await?;
let operation_id: Arc<str> = match status {
MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()),
MethodResponse::Started(s) => s.operation_id.into(),
};
// A function which returns the call to continue the subscription:
let continue_call: ContinueFutGetter = {
let operation_id = operation_id.clone();
Box::new(move || {
let sub_id = sub_id.clone();
let operation_id = operation_id.clone();
let methods = methods.clone();
Box::pin(async move {
methods
.chainhead_v1_continue(&sub_id, &operation_id)
.await?;
Ok(())
})
})
};
Ok(StorageItems::new(
operation_id,
continue_call,
Box::pin(follow_events),
))
}
fn new(
operation_id: Arc<str>,
continue_call: ContinueFutGetter,
follow_event_stream: FollowEventStream<HashFor<T>>,
) -> Self {
Self {
done: false,
buffered_responses: VecDeque::new(),
operation_id,
continue_call,
continue_fut: None,
follow_event_stream,
}
}
}
pub type FollowEventStream<Hash> =
Pin<Box<dyn Stream<Item = FollowEvent<BlockRef<Hash>>> + Send + 'static>>;
pub type ContinueFutGetter = Box<dyn Fn() -> ContinueFut + Send + 'static>;
pub type ContinueFut = Pin<Box<dyn Future<Output = Result<(), BackendError>> + Send + 'static>>;
impl<T: Config> Stream for StorageItems<T> {
type Item = Result<StorageResult, BackendError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
loop {
if self.done {
return Poll::Ready(None);
}
if let Some(item) = self.buffered_responses.pop_front() {
return Poll::Ready(Some(Ok(item)));
}
if let Some(mut fut) = self.continue_fut.take() {
match fut.poll_unpin(cx) {
Poll::Pending => {
self.continue_fut = Some(fut);
return Poll::Pending;
}
Poll::Ready(Err(e)) => {
if e.is_disconnected_will_reconnect() {
self.continue_fut = Some((self.continue_call)());
continue;
}
self.done = true;
return Poll::Ready(Some(Err(e)));
}
Poll::Ready(Ok(())) => {
// Finished; carry on.
}
}
}
let ev = match self.follow_event_stream.poll_next_unpin(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(None),
Poll::Ready(Some(ev)) => ev,
};
match ev {
FollowEvent::OperationWaitingForContinue(id)
if id.operation_id == *self.operation_id =>
{
// Start a call to ask for more events
self.continue_fut = Some((self.continue_call)());
continue;
}
FollowEvent::OperationStorageDone(id) if id.operation_id == *self.operation_id => {
// We're finished!
self.done = true;
return Poll::Ready(None);
}
FollowEvent::OperationStorageItems(items)
if items.operation_id == *self.operation_id =>
{
// We have items; buffer them to emit next loops.
self.buffered_responses = items.items;
continue;
}
FollowEvent::OperationError(err) if err.operation_id == *self.operation_id => {
// Something went wrong obtaining storage items; mark as done and return the error.
self.done = true;
return Poll::Ready(Some(Err(BackendError::Other(err.error))));
}
_ => {
// We don't care about this event; wait for the next.
continue;
}
}
}
}
}
View File
+428
View File
@@ -0,0 +1,428 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
//! This module exposes a legacy backend implementation, which relies
//! on the legacy RPC API methods.
mod descendant_streams;
use self::rpc_methods::TransactionStatus as RpcTransactionStatus;
use crate::backend::utils::{retry, retry_stream};
use crate::backend::{
Backend, BlockRef, StorageResponse, StreamOf, StreamOfResults,
TransactionStatus,
};
use crate::config::{Config, HashFor, Hasher, Header, RpcConfigFor};
use crate::error::BackendError;
use async_trait::async_trait;
use futures::TryStreamExt;
use futures::{Future, Stream, StreamExt, future, future::Either, stream};
use subxt_rpcs::RpcClient;
use codec::Encode;
use descendant_streams::{StorageFetchDescendantKeysStream, StorageFetchDescendantValuesStream};
/// Re-export legacy RPC types and methods from [`subxt_rpcs::methods::legacy`].
pub mod rpc_methods {
pub use subxt_rpcs::methods::legacy::*;
}
// Expose the RPC methods.
pub use rpc_methods::LegacyRpcMethods;
/// Configure and build an [`LegacyBackend`].
pub struct LegacyBackendBuilder<T> {
storage_page_size: u32,
_marker: std::marker::PhantomData<T>,
}
impl<T: Config> Default for LegacyBackendBuilder<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: Config> LegacyBackendBuilder<T> {
/// Create a new [`LegacyBackendBuilder`].
pub fn new() -> Self {
Self {
storage_page_size: 64,
_marker: std::marker::PhantomData,
}
}
/// Iterating over storage entries using the [`LegacyBackend`] requires
/// fetching entries in batches. This configures the number of entries that
/// we'll try to obtain in each batch (default: 64).
pub fn storage_page_size(mut self, storage_page_size: u32) -> Self {
self.storage_page_size = storage_page_size;
self
}
/// Given an [`RpcClient`] to use to make requests, this returns a [`LegacyBackend`],
/// which implements the [`Backend`] trait.
pub fn build(self, client: impl Into<RpcClient>) -> LegacyBackend<T> {
LegacyBackend {
storage_page_size: self.storage_page_size,
methods: LegacyRpcMethods::new(client.into()),
}
}
}
/// The legacy backend.
#[derive(Debug)]
pub struct LegacyBackend<T> {
storage_page_size: u32,
methods: LegacyRpcMethods<RpcConfigFor<T>>,
}
impl<T> Clone for LegacyBackend<T> {
fn clone(&self) -> LegacyBackend<T> {
LegacyBackend {
storage_page_size: self.storage_page_size,
methods: self.methods.clone(),
}
}
}
impl<T: Config> LegacyBackend<T> {
/// Configure and construct an [`LegacyBackend`].
pub fn builder() -> LegacyBackendBuilder<T> {
LegacyBackendBuilder::new()
}
}
impl<T: Config> super::sealed::Sealed for LegacyBackend<T> {}
#[async_trait]
impl<T: Config> Backend<T> for LegacyBackend<T> {
async fn storage_fetch_values(
&self,
keys: Vec<Vec<u8>>,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, BackendError> {
fn get_entry<T: Config>(
key: Vec<u8>,
at: HashFor<T>,
methods: LegacyRpcMethods<RpcConfigFor<T>>,
) -> impl Future<Output = Result<Option<StorageResponse>, BackendError>> {
retry(move || {
let methods = methods.clone();
let key = key.clone();
async move {
let res = methods.state_get_storage(&key, Some(at)).await?;
Ok(res.map(move |value| StorageResponse { key, value }))
}
})
}
let keys = keys.clone();
let methods = self.methods.clone();
// For each key, return it + a future to get the result.
let iter = keys
.into_iter()
.map(move |key| get_entry(key, at, methods.clone()));
let s = stream::iter(iter)
// Resolve the future
.then(|fut| fut)
// Filter any Options out (ie if we didn't find a value at some key we return nothing for it).
.filter_map(|r| future::ready(r.transpose()));
Ok(StreamOf(Box::pin(s)))
}
async fn storage_fetch_descendant_keys(
&self,
key: Vec<u8>,
at: HashFor<T>,
) -> Result<StreamOfResults<Vec<u8>>, BackendError> {
let keys = StorageFetchDescendantKeysStream::new(
self.methods.clone(),
key,
at,
self.storage_page_size
);
let keys = keys.flat_map(|keys| {
match keys {
Err(e) => {
// If there's an error, return that next:
Either::Left(stream::iter(std::iter::once(Err(e))))
}
Ok(keys) => {
// Or, stream each "ok" value:
Either::Right(stream::iter(keys.into_iter().map(Ok)))
}
}
});
Ok(StreamOf(Box::pin(keys)))
}
async fn storage_fetch_descendant_values(
&self,
key: Vec<u8>,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, BackendError> {
let values_stream = StorageFetchDescendantValuesStream::new(
self.methods.clone(),
key,
at,
self.storage_page_size
);
Ok(StreamOf(Box::pin(values_stream)))
}
async fn genesis_hash(&self) -> Result<HashFor<T>, BackendError> {
retry(|| async {
let hash = self.methods.genesis_hash().await?;
Ok(hash)
})
.await
}
async fn block_header(&self, at: HashFor<T>) -> Result<Option<T::Header>, BackendError> {
retry(|| async {
let header = self.methods.chain_get_header(Some(at)).await?;
Ok(header)
})
.await
}
async fn block_body(&self, at: HashFor<T>) -> Result<Option<Vec<Vec<u8>>>, BackendError> {
retry(|| async {
let Some(details) = self.methods.chain_get_block(Some(at)).await? else {
return Ok(None);
};
Ok(Some(
details.block.extrinsics.into_iter().map(|b| b.0).collect(),
))
})
.await
}
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<HashFor<T>>, BackendError> {
retry(|| async {
let hash = self.methods.chain_get_finalized_head().await?;
Ok(BlockRef::from_hash(hash))
})
.await
}
async fn stream_all_block_headers(
&self,
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError> {
let methods = self.methods.clone();
let retry_sub = retry_stream(move || {
let methods = methods.clone();
let hasher = hasher.clone();
Box::pin(async move {
let sub = methods.chain_subscribe_all_heads().await?;
let sub = sub.map_err(|e| e.into()).map(move |r| {
r.map(|h| {
let hash = hasher.hash(&h.encode());
(h, BlockRef::from_hash(hash))
})
});
Ok(StreamOf(Box::pin(sub)))
})
})
.await?;
Ok(retry_sub)
}
async fn stream_best_block_headers(
&self,
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError> {
let methods = self.methods.clone();
let retry_sub = retry_stream(move || {
let methods = methods.clone();
let hasher = hasher.clone();
Box::pin(async move {
let sub = methods.chain_subscribe_new_heads().await?;
let sub = sub.map_err(|e| e.into()).map(move |r| {
r.map(|h| {
let hash = hasher.hash(&h.encode());
(h, BlockRef::from_hash(hash))
})
});
Ok(StreamOf(Box::pin(sub)))
})
})
.await?;
Ok(retry_sub)
}
async fn stream_finalized_block_headers(
&self,
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, BackendError> {
let this = self.clone();
let retry_sub = retry_stream(move || {
let this = this.clone();
let hasher = hasher.clone();
Box::pin(async move {
let sub = this.methods.chain_subscribe_finalized_heads().await?;
// Get the last finalized block immediately so that the stream will emit every finalized block after this.
let last_finalized_block_ref = this.latest_finalized_block_ref().await?;
let last_finalized_block_num = this
.block_header(last_finalized_block_ref.hash())
.await?
.map(|h| h.number().into());
// Fill in any missing blocks, because the backend may not emit every finalized block; just the latest ones which
// are finalized each time.
let sub = subscribe_to_block_headers_filling_in_gaps(
this.methods.clone(),
sub,
last_finalized_block_num,
);
let sub = sub.map(move |r| {
r.map(|h| {
let hash = hasher.hash(&h.encode());
(h, BlockRef::from_hash(hash))
})
});
Ok(StreamOf(Box::pin(sub)))
})
})
.await?;
Ok(retry_sub)
}
async fn submit_transaction(
&self,
extrinsic: &[u8],
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, BackendError> {
let sub = self
.methods
.author_submit_and_watch_extrinsic(extrinsic)
.await?;
let sub = sub.filter_map(|r| {
let mapped = r
.map_err(|e| e.into())
.map(|tx| {
match tx {
// We ignore these because they don't map nicely to the new API. They don't signal "end states" so this should be fine.
RpcTransactionStatus::Future => None,
RpcTransactionStatus::Retracted(_) => None,
// These roughly map across:
RpcTransactionStatus::Ready => Some(TransactionStatus::Validated),
RpcTransactionStatus::Broadcast(_peers) => {
Some(TransactionStatus::Broadcasted)
}
RpcTransactionStatus::InBlock(hash) => {
Some(TransactionStatus::InBestBlock {
hash: BlockRef::from_hash(hash),
})
}
// These 5 mean that the stream will very likely end:
RpcTransactionStatus::FinalityTimeout(_) => {
Some(TransactionStatus::Dropped {
message: "Finality timeout".into(),
})
}
RpcTransactionStatus::Finalized(hash) => {
Some(TransactionStatus::InFinalizedBlock {
hash: BlockRef::from_hash(hash),
})
}
RpcTransactionStatus::Usurped(_) => Some(TransactionStatus::Invalid {
message: "Transaction was usurped by another with the same nonce"
.into(),
}),
RpcTransactionStatus::Dropped => Some(TransactionStatus::Dropped {
message: "Transaction was dropped".into(),
}),
RpcTransactionStatus::Invalid => Some(TransactionStatus::Invalid {
message:
"Transaction is invalid (eg because of a bad nonce, signature etc)"
.into(),
}),
}
})
.transpose();
future::ready(mapped)
});
Ok(StreamOf::new(Box::pin(sub)))
}
async fn call(
&self,
method: &str,
call_parameters: Option<&[u8]>,
at: HashFor<T>,
) -> Result<Vec<u8>, BackendError> {
retry(|| async {
let res = self
.methods
.state_call(method, call_parameters, Some(at))
.await?;
Ok(res)
})
.await
}
}
/// Note: This is exposed for testing but is not considered stable and may change
/// without notice in a patch release.
#[doc(hidden)]
pub fn subscribe_to_block_headers_filling_in_gaps<T, S, E>(
methods: LegacyRpcMethods<RpcConfigFor<T>>,
sub: S,
mut last_block_num: Option<u64>,
) -> impl Stream<Item = Result<T::Header, BackendError>> + Send
where
T: Config,
S: Stream<Item = Result<T::Header, E>> + Send,
E: Into<BackendError> + Send + 'static,
{
sub.flat_map(move |s| {
// Get the header, or return a stream containing just the error.
let header = match s {
Ok(header) => header,
Err(e) => return Either::Left(stream::once(async { Err(e.into()) })),
};
// We want all previous details up to, but not including this current block num.
let end_block_num = header.number().into();
// This is one after the last block we returned details for last time.
let start_block_num = last_block_num.map(|n| n + 1).unwrap_or(end_block_num);
// Iterate over all of the previous blocks we need headers for, ignoring the current block
// (which we already have the header info for):
let methods = methods.clone();
let previous_headers = stream::iter(start_block_num..end_block_num)
.then(move |n| {
let methods = methods.clone();
async move {
let hash = methods.chain_get_block_hash(Some(n.into())).await?;
let header = methods.chain_get_header(hash).await?;
Ok::<_, BackendError>(header)
}
})
.filter_map(async |h| h.transpose());
// On the next iteration, we'll get details starting just after this end block.
last_block_num = Some(end_block_num);
// Return a combination of any previous headers plus the new header.
Either::Right(previous_headers.chain(stream::once(async { Ok(header) })))
})
}
@@ -0,0 +1,256 @@
use crate::backend::utils::retry;
use crate::backend::StorageResponse;
use crate::config::{Config, HashFor, RpcConfigFor};
use crate::error::BackendError;
use futures::{Future, FutureExt, Stream, StreamExt};
use std::collections::VecDeque;
use std::pin::Pin;
use std::task::{Context, Poll};
use super::LegacyRpcMethods;
/// This provides a stream of values given some prefix `key`. It
/// internally manages pagination and such.
#[allow(clippy::type_complexity)]
pub struct StorageFetchDescendantKeysStream<T: Config> {
methods: LegacyRpcMethods<RpcConfigFor<T>>,
key: Vec<u8>,
at: HashFor<T>,
// How many entries to ask for each time.
storage_page_size: u32,
// What key do we start paginating from? None = from the beginning.
pagination_start_key: Option<Vec<u8>>,
// Keys, future and cached:
keys_fut:
Option<Pin<Box<dyn Future<Output = Result<Vec<Vec<u8>>, BackendError>> + Send + 'static>>>,
// Set to true when we're done:
done: bool,
}
impl <T: Config> StorageFetchDescendantKeysStream<T> {
/// Fetch descendant keys.
pub fn new(
methods: LegacyRpcMethods<RpcConfigFor<T>>,
key: Vec<u8>,
at: HashFor<T>,
storage_page_size: u32,
) -> Self {
StorageFetchDescendantKeysStream {
methods,
key,
at,
storage_page_size,
pagination_start_key: None,
keys_fut: None,
done: false,
}
}
}
impl<T: Config> std::marker::Unpin for StorageFetchDescendantKeysStream<T> {}
impl<T: Config> Stream for StorageFetchDescendantKeysStream<T> {
type Item = Result<Vec<Vec<u8>>, BackendError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut();
loop {
// We're already done.
if this.done {
return Poll::Ready(None);
}
// Poll future to fetch next keys.
if let Some(mut keys_fut) = this.keys_fut.take() {
match keys_fut.poll_unpin(cx) {
Poll::Ready(Ok(mut keys)) => {
if this.pagination_start_key.is_some()
&& keys.first() == this.pagination_start_key.as_ref()
{
// Currently, Smoldot returns the "start key" as the first key in the input
// (see https://github.com/smol-dot/smoldot/issues/1692), whereas Substrate doesn't.
// We don't expect the start key to be returned either (since it was the last key of prev
// iteration), so remove it if we see it. This `remove()` method isn't very efficient but
// this will be a non issue with the RPC V2 APIs or if Smoldot aligns with Substrate anyway.
keys.remove(0);
}
if keys.is_empty() {
// No keys left; we're done!
this.done = true;
return Poll::Ready(None);
}
// The last key is where we want to paginate from next time.
this.pagination_start_key = keys.last().cloned();
// return all of the keys from this run.
return Poll::Ready(Some(Ok(keys)));
}
Poll::Ready(Err(e)) => {
if e.is_disconnected_will_reconnect() {
// Loop around and try again. No more keys_fut as it was taken,
// so we'll ask for the keys again from the last good pagination_start_key.
continue;
}
// Error getting keys? Return it.
return Poll::Ready(Some(Err(e)));
},
Poll::Pending => {
this.keys_fut = Some(keys_fut);
return Poll::Pending;
}
}
}
// Else, we don't have a fut to get keys yet so start one going.
let methods = this.methods.clone();
let key = this.key.clone();
let at = this.at;
let storage_page_size = this.storage_page_size;
let pagination_start_key = this.pagination_start_key.clone();
let keys_fut = async move {
let keys = methods
.state_get_keys_paged(
&key,
storage_page_size,
pagination_start_key.as_deref(),
Some(at),
)
.await?;
Ok(keys)
};
this.keys_fut = Some(Box::pin(keys_fut));
}
}
}
/// This provides a stream of values given some stream of keys.
#[allow(clippy::type_complexity)]
pub struct StorageFetchDescendantValuesStream<T: Config> {
// Stream of keys.
keys_stream: StorageFetchDescendantKeysStream<T>,
// Keys back from the stream which we are currently trying to fetch results for:
keys: Vec<Vec<u8>>,
// A future which will resolve to the resulting values:
results_fut: Option<
Pin<
Box<
dyn Future<Output = Result<Option<VecDeque<(Vec<u8>, Vec<u8>)>>, BackendError>>
+ Send
+ 'static,
>,
>,
>,
// Once we get values back we put them here and hand them back one by one to the caller.
results: VecDeque<(Vec<u8>, Vec<u8>)>,
}
impl <T: Config> StorageFetchDescendantValuesStream<T> {
/// Fetch descendant values.
pub fn new(
methods: LegacyRpcMethods<RpcConfigFor<T>>,
key: Vec<u8>,
at: HashFor<T>,
storage_page_size: u32,
) -> Self {
StorageFetchDescendantValuesStream {
keys_stream: StorageFetchDescendantKeysStream {
methods,
key,
at,
storage_page_size,
pagination_start_key: None,
keys_fut: None,
done: false,
},
keys: Default::default(),
results_fut: None,
results: Default::default(),
}
}
}
impl<T: Config> Stream for StorageFetchDescendantValuesStream<T> {
type Item = Result<StorageResponse, BackendError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut();
loop {
// If we have results back, return them one by one
if let Some((key, value)) = this.results.pop_front() {
let res = StorageResponse { key, value };
return Poll::Ready(Some(Ok(res)));
}
// If we're waiting on the next results then poll that future:
if let Some(mut results_fut) = this.results_fut.take() {
match results_fut.poll_unpin(cx) {
Poll::Ready(Ok(Some(results))) => {
// Clear keys once result comes back.
this.keys = Vec::new();
this.results = results;
continue;
}
Poll::Ready(Ok(None)) => {
// Clear keys once result comes back.
this.keys = Vec::new();
// But no results back for these keys we we just skip them.
continue;
}
Poll::Ready(Err(e)) => {
if e.is_disconnected_will_reconnect() {
// Don't replace the `results_fut` since we got disconnected, and loop around.
// This will cause us to try re-fetching results for the current keys.
continue;
}
return Poll::Ready(Some(Err(e)))
}
Poll::Pending => {
this.results_fut = Some(results_fut);
return Poll::Pending;
}
}
}
// If we have keys ready to fetch results for, then line up a results future to get them.
// The keys stream handles disconnections internally for us.
if !this.keys.is_empty() {
let methods = this.keys_stream.methods.clone();
let at = this.keys_stream.at;
let keys = this.keys.clone();
let results_fut = async move {
let keys = keys.iter().map(|k| &**k);
let values = retry(|| async {
let res = methods
.state_query_storage_at(keys.clone(), Some(at))
.await?;
Ok(res)
})
.await?;
let values: VecDeque<_> = values
.into_iter()
.flat_map(|v| {
v.changes.into_iter().filter_map(|(k, v)| {
let v = v?;
Some((k.0, v.0))
})
})
.collect();
Ok(Some(values))
};
this.results_fut = Some(Box::pin(results_fut));
continue;
}
// We have no keys yet so wait for those first.
match this.keys_stream.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(keys))) => {
this.keys = keys;
continue;
}
Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e))),
Poll::Ready(None) => return Poll::Ready(None),
Poll::Pending => return Poll::Pending,
}
}
}
}
+273
View File
@@ -0,0 +1,273 @@
//! RPC utils.
use super::{StreamOf, StreamOfResults};
use crate::error::BackendError;
use futures::{FutureExt, Stream, StreamExt};
use std::{future::Future, pin::Pin, task::Poll};
/// Resubscribe callback.
type ResubscribeGetter<T> = Box<dyn FnMut() -> ResubscribeFuture<T> + Send>;
/// Future that resolves to a subscription stream.
type ResubscribeFuture<T> =
Pin<Box<dyn Future<Output = Result<StreamOfResults<T>, BackendError>> + Send>>;
/// Retry subscription.
struct RetrySubscription<F, R, T> {
resubscribe: F,
state: RetrySubscriptionState<R, T>,
}
enum RetrySubscriptionState<R, T> {
Init,
Pending(R),
Stream(StreamOfResults<T>),
Done,
}
impl<F, R, T> std::marker::Unpin for RetrySubscription<F, R, T> {}
impl<F, R, T> Stream for RetrySubscription<F, R, T>
where
F: FnMut() -> R,
R: Future<Output = Result<StreamOfResults<T>, BackendError>> + Unpin,
{
type Item = Result<T, BackendError>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Option<Self::Item>> {
loop {
match &mut self.state {
RetrySubscriptionState::Init => {
self.state = RetrySubscriptionState::Pending((self.resubscribe)());
},
RetrySubscriptionState::Stream(s) => match s.poll_next_unpin(cx) {
Poll::Ready(Some(Err(err))) => {
if err.is_disconnected_will_reconnect() {
self.state = RetrySubscriptionState::Init;
}
return Poll::Ready(Some(Err(err)));
}
Poll::Ready(None) => {
return Poll::Ready(None)
}
Poll::Ready(Some(Ok(val))) => {
return Poll::Ready(Some(Ok(val)));
}
Poll::Pending => {
return Poll::Pending;
}
},
RetrySubscriptionState::Pending(fut) => match fut.poll_unpin(cx) {
Poll::Ready(Err(err)) => {
if err.is_disconnected_will_reconnect() {
self.state = RetrySubscriptionState::Init;
}
return Poll::Ready(Some(Err(err)));
}
Poll::Ready(Ok(stream)) => {
self.state = RetrySubscriptionState::Stream(stream);
continue;
}
Poll::Pending => {
return Poll::Pending;
}
},
RetrySubscriptionState::Done => {
return Poll::Ready(None)
}
};
}
}
}
/// Retry a future until it doesn't return a disconnected error.
///
/// # Example
///
/// ```rust,no_run,standalone_crate
/// use subxt::backend::utils::retry;
///
/// async fn some_future() -> Result<(), subxt::error::BackendError> {
/// Ok(())
/// }
///
/// #[tokio::main]
/// async fn main() {
/// let result = retry(|| some_future()).await;
/// }
/// ```
pub async fn retry<T, F, R>(mut retry_future: F) -> Result<R, BackendError>
where
F: FnMut() -> T,
T: Future<Output = Result<R, BackendError>>,
{
const REJECTED_MAX_RETRIES: usize = 10;
let mut rejected_retries = 0;
loop {
match retry_future().await {
Ok(v) => return Ok(v),
Err(e) => {
if e.is_disconnected_will_reconnect() {
continue;
}
// TODO: https://github.com/paritytech/subxt/issues/1567
// This is a hack because, in the event of a disconnection,
// we may not get the correct subscription ID back on reconnecting.
//
// This is because we have a race between this future and the
// separate chainHead subscription, which runs in a different task.
// if this future is too quick, it'll be given back an old
// subscription ID from the chainHead subscription which has yet
// to reconnect and establish a new subscription ID.
//
// In the event of a wrong subscription Id being used, we happen to
// hand back an `RpcError::LimitReached`, and so can retry when we
// specifically hit that error to see if we get a new subscription ID
// eventually.
if e.is_rpc_limit_reached() && rejected_retries < REJECTED_MAX_RETRIES {
rejected_retries += 1;
continue;
}
return Err(e);
}
}
}
}
/// Create a retry stream that will resubscribe on disconnect.
///
/// It's important to note that this function is intended to work only for stateless subscriptions.
/// If the subscription takes input or modifies state, this function should not be used.
///
/// # Example
///
/// ```rust,no_run,standalone_crate
/// use subxt::backend::{utils::retry_stream, StreamOf};
/// use futures::future::FutureExt;
///
/// #[tokio::main]
/// async fn main() {
/// retry_stream(|| {
/// // This needs to return a stream of results but if you are using
/// // the subxt backend already it will return StreamOf so you can just
/// // return it directly in the async block below.
/// async move { Ok(StreamOf::new(Box::pin(futures::stream::iter([Ok(2)])))) }.boxed()
/// }).await;
/// }
/// ```
pub async fn retry_stream<F, Fut, R>(get_stream: F) -> Result<StreamOfResults<R>, BackendError>
where
F: Clone + Send + 'static + FnMut() -> Fut,
Fut: Future<Output = Result<StreamOfResults<R>, BackendError>> + Send,
R: Send + 'static,
{
// This returns the stream. On disconnect this is called again.
let get_stream_with_retry = move || {
let get_stream = get_stream.clone();
async move { retry(get_stream).await }.boxed()
};
// The extra Box is to encapsulate the retry subscription type
Ok(StreamOf::new(Box::pin(RetrySubscription {
state: RetrySubscriptionState::Init,
resubscribe: get_stream_with_retry,
})))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::backend::StreamOf;
fn disconnect_err() -> BackendError {
BackendError::Rpc(subxt_rpcs::Error::DisconnectedWillReconnect(String::new()).into())
}
fn custom_err() -> BackendError {
BackendError::Other(String::new())
}
#[tokio::test]
async fn retry_stream_works() {
let retry_stream = retry_stream(|| {
async {
Ok(StreamOf::new(Box::pin(futures::stream::iter([
Ok(1),
Ok(2),
Ok(3),
Err(disconnect_err()),
]))))
}
.boxed()
})
.await
.unwrap();
let result = retry_stream
.take(5)
.collect::<Vec<Result<usize, BackendError>>>()
.await;
assert!(matches!(result[0], Ok(r) if r == 1));
assert!(matches!(result[1], Ok(r) if r == 2));
assert!(matches!(result[2], Ok(r) if r == 3));
assert!(matches!(result[3], Err(ref e) if e.is_disconnected_will_reconnect()));
assert!(matches!(result[4], Ok(r) if r == 1));
}
#[tokio::test]
async fn retry_sub_works() {
let stream = futures::stream::iter([Ok(1), Err(disconnect_err())]);
let resubscribe = Box::new(move || {
async move { Ok(StreamOf::new(Box::pin(futures::stream::iter([Ok(2)])))) }.boxed()
});
let retry_stream = RetrySubscription {
state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))),
resubscribe,
};
let result: Vec<_> = retry_stream.collect().await;
assert!(matches!(result[0], Ok(r) if r == 1));
assert!(matches!(result[1], Err(ref e) if e.is_disconnected_will_reconnect()));
assert!(matches!(result[2], Ok(r) if r == 2));
}
#[tokio::test]
async fn retry_sub_err_terminates_stream() {
let stream = futures::stream::iter([Ok(1)]);
let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed());
let retry_stream = RetrySubscription {
state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))),
resubscribe,
};
assert_eq!(retry_stream.count().await, 1);
}
#[tokio::test]
async fn retry_sub_resubscribe_err() {
let stream = futures::stream::iter([Ok(1), Err(disconnect_err())]);
let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed());
let retry_stream = RetrySubscription {
state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))),
resubscribe,
};
let result: Vec<_> = retry_stream.collect().await;
assert!(matches!(result[0], Ok(r) if r == 1));
assert!(matches!(result[1], Err(ref e) if e.is_disconnected_will_reconnect()));
assert!(matches!(result[2], Err(ref e) if matches!(e, BackendError::Other(_))));
}
}
+28 -1
View File
@@ -1,2 +1,29 @@
mod offline_client;
mod online_client;
mod offline_client;
use core::marker::PhantomData;
// We keep these traits internal, so that we can mess with them later if needed,
// and instead only the concrete types are public which wrap these trait impls.
pub(crate) use offline_client::OfflineClientAtBlockT;
pub(crate) use online_client::OnlineClientAtBlockT;
pub use offline_client::OfflineClient;
pub use online_client::OnlineClient;
/// This represents a client at a specific block number.
#[derive(Clone, Debug)]
pub struct ClientAtBlock<Client, T> {
client: Client,
marker: PhantomData<T>,
}
impl<Client, T> ClientAtBlock<Client, T> {
/// Construct a new client at some block.
pub(crate) fn new(client: Client) -> Self {
Self {
client,
marker: PhantomData,
}
}
}
+58
View File
@@ -0,0 +1,58 @@
use crate::config::Config;
use crate::client::ClientAtBlock;
use crate::error::OfflineClientAtBlockError;
use subxt_metadata::Metadata;
use std::sync::Arc;
#[derive(Clone, Debug)]
pub struct OfflineClient<T: Config> {
/// The configuration for this client.
config: T,
}
impl<T: Config> OfflineClient<T> {
/// Create a new [`OfflineClient`] with the given configuration.
pub fn new(config: T) -> Self {
OfflineClient {
config,
}
}
/// Pick the block height at which to operate. This references data from the
/// [`OfflineClient`] it's called on, and so cannot outlive it.
pub fn at(
&self,
block_number: u32,
) -> Result<ClientAtBlock<OfflineClientAtBlock, T>, OfflineClientAtBlockError> {
let spec_version = self
.config
.spec_version_for_block_number(block_number)
.ok_or(OfflineClientAtBlockError::SpecVersionNotFound { block_number })?;
let metadata = self
.config
.metadata_for_spec_version(spec_version)
.ok_or(OfflineClientAtBlockError::MetadataNotFound { spec_version })?;
Ok(ClientAtBlock::new(OfflineClientAtBlock {
metadata,
}))
}
}
pub struct OfflineClientAtBlock {
metadata: Arc<Metadata>,
}
/// This represents an offline-only client at a specific block.
#[doc(hidden)]
pub trait OfflineClientAtBlockT {
/// Get the metadata appropriate for this block.
fn metadata(&self) -> &Metadata;
}
impl OfflineClientAtBlockT for OfflineClientAtBlock {
fn metadata(&self) -> &Metadata {
&self.metadata
}
}
+337
View File
@@ -0,0 +1,337 @@
use super::ClientAtBlock;
use super::OfflineClientAtBlockT;
use crate::config::{ Config, HashFor, RpcConfigFor };
use crate::error::OnlineClientAtBlockError;
use crate::backend::Backend;
use codec::{Compact, Decode, Encode};
use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed};
use scale_info_legacy::TypeRegistrySet;
use std::sync::Arc;
use subxt_rpcs::methods::chain_head::ArchiveCallResult;
use subxt_rpcs::{ChainHeadRpcMethods, RpcClient};
use subxt_metadata::Metadata;
#[cfg(feature = "jsonrpsee")]
#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))]
use crate::error::OnlineClientError;
/// A client which exposes the means to decode historic data on a chain online.
#[derive(Clone, Debug)]
pub struct OnlineClient<T: Config> {
inner: Arc<OnlineClientInner<T>>,
}
struct OnlineClientInner<T: Config> {
/// The configuration for this client.
config: T,
/// The RPC methods used to communicate with the node.
backend: Arc<dyn Backend<T>>,
}
impl <T: Config> std::fmt::Debug for OnlineClientInner<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("OnlineClientInner")
.field("config", &"<config>")
.field("backend", &"Arc<backend impl>")
.finish()
}
}
impl<T: Config> OnlineClient<T> {
/// Construct a new [`OnlineClient`] using default settings which
/// point to a locally running node on `ws://127.0.0.1:9944`.
///
/// **Note:** This will only work if the local node is an archive node.
#[cfg(feature = "jsonrpsee")]
pub async fn new(config: T) -> Result<OnlineClient<T>, OnlineClientError> {
let url = "ws://127.0.0.1:9944";
OnlineClient::from_url(config, url).await
}
/// Construct a new [`OnlineClient`], providing a URL to connect to.
#[cfg(feature = "jsonrpsee")]
pub async fn from_url(
config: T,
url: impl AsRef<str>,
) -> Result<OnlineClient<T>, OnlineClientError> {
let url_str = url.as_ref();
let url = url::Url::parse(url_str).map_err(|_| OnlineClientError::InvalidUrl {
url: url_str.to_string(),
})?;
if !Self::is_url_secure(&url) {
return Err(OnlineClientError::RpcError(
subxt_rpcs::Error::InsecureUrl(url_str.to_string()),
));
}
OnlineClient::from_insecure_url(config, url).await
}
/// Construct a new [`OnlineClient`], providing a URL to connect to.
///
/// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs).
#[cfg(feature = "jsonrpsee")]
pub async fn from_insecure_url(
config: T,
url: impl AsRef<str>,
) -> Result<OnlineClient<T>, OnlineClientError> {
let rpc_client = RpcClient::from_insecure_url(url).await?;
Ok(OnlineClient::from_rpc_client(config, rpc_client))
}
fn is_url_secure(url: &url::Url) -> bool {
let secure_scheme = url.scheme() == "https" || url.scheme() == "wss";
let is_localhost = url.host().is_some_and(|e| match e {
url::Host::Domain(e) => e == "localhost",
url::Host::Ipv4(e) => e.is_loopback(),
url::Host::Ipv6(e) => e.is_loopback(),
});
secure_scheme || is_localhost
}
/// Construct a new [`OnlineClient`] by providing an [`RpcClient`] to drive the connection.
/// This will use the current default [`Backend`], which may change in future releases.
#[cfg(feature = "jsonrpsee")]
pub fn from_rpc_client(
config: T,
rpc_client: impl Into<RpcClient>,
) -> OnlineClient<T> {
let rpc_client = rpc_client.into();
let backend = Arc::new(LegacyBackend::builder().build(rpc_client));
OnlineClient::from_backend(config, backend)
}
/// Construct a new [`OnlineClient`] by providing an underlying [`Backend`]
/// implementation to power it.
pub fn from_backend<B: Backend<T>>(
config: T,
backend: impl Into<Arc<dyn Backend<T>>>,
) -> OnlineClient<T> {
OnlineClient {
inner: Arc::new(OnlineClientInner {
config,
backend: backend.into()
})
}
}
/// Pick the block height at which to operate. This references data from the
/// [`OnlineClient`] it's called on, and so cannot outlive it.
pub async fn at_block(
&self,
block_number: u32,
) -> Result<ClientAtBlock<OnlineClientAtBlock<T>, T>, OnlineClientAtBlockError> {
let config = &self.inner.config;
let rpc_methods = &self.inner.rpc_methods;
let block_hash = rpc_methods
.archive_v1_hash_by_height(block_number as usize)
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetBlockHash {
block_number,
reason: e,
})?
.pop()
.ok_or_else(|| OnlineClientAtBlockError::BlockNotFound { block_number })?
.into();
// Get our configuration, or fetch from the node if not available.
let spec_version =
if let Some(spec_version) = config.spec_version_for_block_number(block_number) {
spec_version
} else {
// Fetch spec version. Caching this doesn't really make sense, so either
// details are provided offline or we fetch them every time.
get_spec_version(rpc_methods, block_hash).await?
};
let metadata = if let Some(metadata) = config.metadata_for_spec_version(spec_version) {
metadata
} else {
// Fetch and then give our config the opportunity to cache this metadata.
let metadata = get_metadata(rpc_methods, block_hash).await?;
let metadata = Arc::new(metadata);
config.set_metadata_for_spec_version(spec_version, metadata.clone());
metadata
};
let mut historic_types = config.legacy_types_for_spec_version(spec_version);
// The metadata can be used to construct call and event types instead of us having to hardcode them all for every spec version:
let types_from_metadata = frame_decode::helpers::type_registry_from_metadata_any(&metadata)
.map_err(
|parse_error| OnlineClientAtBlockError::CannotInjectMetadataTypes { parse_error },
)?;
historic_types.prepend(types_from_metadata);
Ok(ClientAtBlock::new(OnlineClientAtBlock {
config,
historic_types,
metadata,
rpc_methods,
block_hash,
}))
}
}
/// This represents an online client at a specific block.
#[doc(hidden)]
pub trait OnlineClientAtBlockT<T: Config>: OfflineClientAtBlockT
{
/// Return the RPC methods we'll use to interact with the node.
fn backend(&self) -> &dyn Backend<T>;
/// Return the block hash for the current block.
fn block_hash(&self) -> HashFor<T>;
}
// Dev note: this shouldn't need to be exposed unless there is some
// need to explicitly name the ClientAAtBlock type. Rather keep it
// private to allow changes if possible.
#[doc(hidden)]
pub struct OnlineClientAtBlock<T: Config> {
metadata: Arc<Metadata>,
backend: Arc<dyn Backend<T>>,
hasher: T::Hasher,
block_hash: HashFor<T>,
}
impl<T: Config> OnlineClientAtBlockT<T> for OnlineClientAtBlock<T> {
fn backend(&self) -> &dyn Backend<T> {
&*self.backend
}
fn block_hash(&self) -> HashFor<T> {
self.block_hash
}
}
impl<T: Config> OfflineClientAtBlockT for OnlineClientAtBlock<T> {
fn metadata(&self) -> &Metadata {
&self.metadata
}
}
async fn get_spec_version<T: Config>(
rpc_methods: &ChainHeadRpcMethods<RpcConfigFor<T>>,
block_hash: HashFor<T>,
) -> Result<u32, OnlineClientAtBlockError> {
use codec::Decode;
use subxt_rpcs::methods::chain_head::ArchiveCallResult;
// make a runtime call to get the version information. This is also a constant
// in the metadata and so we could fetch it from there to avoid the call, but it would be a
// bit more effort.
let spec_version_bytes = {
let call_res = rpc_methods
.archive_v1_call(block_hash.into(), "Core_version", &[])
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion {
block_hash: block_hash.to_string(),
reason: format!("Error calling Core_version: {e}"),
})?;
match call_res {
ArchiveCallResult::Success(bytes) => bytes.0,
ArchiveCallResult::Error(e) => {
return Err(OnlineClientAtBlockError::CannotGetSpecVersion {
block_hash: block_hash.to_string(),
reason: format!("Core_version returned an error: {e}"),
});
}
}
};
// We only care about the spec version, so just decode enough of this version information
// to be able to pluck out what we want, and ignore the rest.
let spec_version = {
#[derive(codec::Decode)]
struct SpecVersionHeader {
_spec_name: String,
_impl_name: String,
_authoring_version: u32,
spec_version: u32,
}
SpecVersionHeader::decode(&mut &spec_version_bytes[..])
.map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion {
block_hash: block_hash.to_string(),
reason: format!("Error decoding Core_version response: {e}"),
})?
.spec_version
};
Ok(spec_version)
}
async fn get_metadata<T: Config>(
rpc_methods: &ChainHeadRpcMethods<RpcConfigFor<T>>,
block_hash: HashFor<T>,
) -> Result<RuntimeMetadata, OnlineClientAtBlockError> {
// First, try to use the "modern" metadata APIs to get the most recent version we can.
let version_to_get = rpc_methods
.archive_v1_call(block_hash.into(), "Metadata_metadata_versions", &[])
.await
.ok()
.and_then(|res| res.as_success())
.and_then(|res| <Vec<u32>>::decode(&mut &res[..]).ok())
.and_then(|versions| {
// We want to filter out the "unstable" version, which is represented by u32::MAX.
versions.into_iter().filter(|v| *v != u32::MAX).max()
});
// We had success calling the above API, so we expect the "modern" metadata API to work.
if let Some(version_to_get) = version_to_get {
let version_bytes = version_to_get.encode();
let rpc_response = rpc_methods
.archive_v1_call(
block_hash.into(),
"Metadata_metadata_at_version",
&version_bytes,
)
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error calling Metadata_metadata_at_version: {e}"),
})
.and_then(|res| match res {
ArchiveCallResult::Success(bytes) => Ok(bytes.0),
ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Calling Metadata_metadata_at_version returned an error: {e}"),
}),
})?;
// Option because we may have asked for a version that doesn't exist. Compact because we get back a Vec<u8>
// of the metadata bytes, and the Vec is preceded by it's compact encoded length. The actual bytes are then
// decoded as a `RuntimeMetadataPrefixed`, after this.
let (_, metadata) = <Option<(Compact<u32>, RuntimeMetadataPrefixed)>>::decode(&mut &rpc_response[..])
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error decoding response for Metadata_metadata_at_version: {e}"),
})?
.ok_or_else(|| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("No metadata returned for the latest version from Metadata_metadata_versions ({version_to_get})"),
})?;
return Ok(metadata.1);
}
// We didn't get a version from Metadata_metadata_versions, so fall back to the "old" API.
let metadata_bytes = rpc_methods
.archive_v1_call(block_hash.into(), "Metadata_metadata", &[])
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error calling Metadata_metadata: {e}"),
})
.and_then(|res| match res {
ArchiveCallResult::Success(bytes) => Ok(bytes.0),
ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Calling Metadata_metadata returned an error: {e}"),
}),
})?;
let (_, metadata) = <(Compact<u32>, RuntimeMetadataPrefixed)>::decode(&mut &metadata_bytes[..])
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error decoding response for Metadata_metadata: {e}"),
})?;
Ok(metadata.1)
}
+17 -9
View File
@@ -21,7 +21,7 @@ use scale_decode::DecodeAsType;
use scale_encode::EncodeAsType;
use serde::{Serialize, de::DeserializeOwned};
use subxt_metadata::Metadata;
use std::{marker::PhantomData, sync::Arc};
use std::{fmt::Display, marker::PhantomData, sync::Arc};
use scale_info_legacy::TypeRegistrySet;
use subxt_rpcs::RpcConfig;
@@ -63,7 +63,9 @@ pub trait Config: Clone + Debug + Sized + Send + Sync + 'static {
///
/// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here,
/// but the [`crate::client::OfflineClient`] will error if this is not available for the required block number.
fn spec_version_for_block_number(&self, block_number: u32) -> Option<u32>;
fn spec_version_for_block_number(&self, _block_number: u32) -> Option<u32> {
None
}
/// Return the metadata for a given spec version, if available.
///
@@ -72,17 +74,19 @@ pub trait Config: Clone + Debug + Sized + Send + Sync + 'static {
/// The [`crate::client::OfflineClient`] will error if this is not available for the required spec version.
fn metadata_for_spec_version(
&self,
spec_version: u32,
) -> Option<Arc<Metadata>>;
_spec_version: u32,
) -> Option<Arc<Metadata>> {
None
}
/// Set some metadata for a given spec version. the [`crate::client::OnlineClient`] will call this if it has
/// to retrieve metadata from the chain, to give this the opportunity to cache it. The configuration can
/// do nothing if it prefers.
fn set_metadata_for_spec_version(
&self,
spec_version: u32,
metadata: Arc<Metadata>,
);
_spec_version: u32,
_metadata: Arc<Metadata>,
) {}
/// Return legacy types (ie types to use with Runtimes that return pre-V14 metadata) for a given spec version.
/// If this returns `None`, [`subxt`] will return an error if type definitions are needed to access some older
@@ -92,8 +96,10 @@ pub trait Config: Clone + Debug + Sized + Send + Sync + 'static {
/// into our [`Metadata`] type, which will then be used.
fn legacy_types_for_spec_version<'this>(
&'this self,
spec_version: u32,
) -> Option<TypeRegistrySet<'this>>;
_spec_version: u32,
) -> Option<TypeRegistrySet<'this>> {
None
}
}
/// `RpcConfigFor<Config>` can be used anywhere which requires an implementation of [`subxt_rpcs::RpcConfig`].
@@ -117,6 +123,7 @@ pub type ParamsFor<T> = <<T as Config>::ExtrinsicParams as ExtrinsicParams<T>>::
/// Block hashes must conform to a bunch of things to be used in Subxt.
pub trait Hash:
Debug
+ Display
+ Copy
+ Send
+ Sync
@@ -132,6 +139,7 @@ pub trait Hash:
}
impl<T> Hash for T where
T: Debug
+ Display
+ Copy
+ Send
+ Sync
+12
View File
@@ -22,6 +22,7 @@ pub struct SubstrateConfigBuilder {
legacy_types: Option<ChainTypeRegistry>,
spec_version_for_block_number: RangeMap<u32, u32>,
metadata_for_spec_version: Mutex<HashMap<u32, Arc<Metadata>>>,
use_old_v9_hashers_before_spec_version: u32,
}
impl Default for SubstrateConfigBuilder {
@@ -37,6 +38,7 @@ impl SubstrateConfigBuilder {
legacy_types: None,
spec_version_for_block_number: RangeMap::empty(),
metadata_for_spec_version: Mutex::new(HashMap::new()),
use_old_v9_hashers_before_spec_version: 0,
}
}
@@ -77,6 +79,16 @@ impl SubstrateConfigBuilder {
self
}
/// The storage hasher encoding/decoding changed during V9 metadata. By default we support the "new" version
/// of things. We can use this option to support the old version of things prior to a given spec version.
pub fn use_old_v9_hashers_before_spec_version(
mut self,
spec_version: u32
) -> Self {
self.use_old_v9_hashers_before_spec_version = spec_version;
self
}
/// Construct the [`SubstrateConfig`] from this builder.
pub fn build(self) -> SubstrateConfig {
SubstrateConfig {
+108 -32
View File
@@ -30,6 +30,12 @@ pub use subxt_metadata::TryFromError as MetadataTryFromError;
#[non_exhaustive]
#[allow(missing_docs)]
pub enum Error {
#[error(transparent)]
OnlineClientError(#[from] OnlineClientError),
#[error(transparent)]
OfflineClientAtBlockError(#[from] OfflineClientAtBlockError),
#[error(transparent)]
OnlineClientAtBlockError(#[from] OnlineClientAtBlockError),
#[error(transparent)]
ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt),
#[error(transparent)]
@@ -47,8 +53,6 @@ pub enum Error {
#[error(transparent)]
AccountNonceError(#[from] AccountNonceError),
#[error(transparent)]
OnlineClientError(#[from] OnlineClientError),
#[error(transparent)]
RuntimeUpdaterError(#[from] RuntimeUpdaterError),
#[error(transparent)]
RuntimeUpdateeApplyError(#[from] RuntimeUpdateeApplyError),
@@ -156,6 +160,108 @@ impl Error {
}
}
/// Errors constructing an offline client at a specific block number.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum OfflineClientAtBlockError {
#[error(
"Cannot construct OfflineClientAtBlock: spec version not found for block number {block_number}"
)]
SpecVersionNotFound {
/// The block number for which the spec version was not found.
block_number: u32,
},
#[error(
"Cannot construct OfflineClientAtBlock: metadata not found for spec version {spec_version}"
)]
MetadataNotFound {
/// The spec version for which the metadata was not found.
spec_version: u32,
},
}
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
#[allow(missing_docs)]
pub enum OnlineClientError {
#[error("Cannot construct OnlineClient: The URL provided is invalid: {url}")]
InvalidUrl {
/// The URL that was invalid.
url: String,
},
#[error("Cannot construct OnlineClient: {0}")]
RpcError(#[from] subxt_rpcs::Error),
#[error(
"Cannot construct OnlineClient: Cannot fetch latest finalized block to obtain init details from: {0}"
)]
CannotGetLatestFinalizedBlock(BackendError),
#[error("Cannot construct OnlineClient: Cannot fetch genesis hash: {0}")]
CannotGetGenesisHash(BackendError),
#[error("Cannot construct OnlineClient: Cannot fetch current runtime version: {0}")]
CannotGetCurrentRuntimeVersion(BackendError),
#[error("Cannot construct OnlineClient: Cannot fetch metadata: {0}")]
CannotFetchMetadata(BackendError),
}
impl OnlineClientError {
fn backend_error(&self) -> Option<&BackendError> {
match self {
OnlineClientError::CannotGetLatestFinalizedBlock(e)
| OnlineClientError::CannotGetGenesisHash(e)
| OnlineClientError::CannotGetCurrentRuntimeVersion(e)
| OnlineClientError::CannotFetchMetadata(e) => Some(e),
_ => None,
}
}
}
/// Errors constructing an online client at a specific block number.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum OnlineClientAtBlockError {
#[error(
"Cannot construct OnlineClientAtBlock: failed to get block hash from node for block {block_number}: {reason}"
)]
CannotGetBlockHash {
/// Block number we failed to get the hash for.
block_number: u64,
/// The error we encountered.
reason: subxt_rpcs::Error,
},
#[error("Cannot construct OnlineClientAtBlock: block number {block_number} not found")]
BlockNotFound {
/// The block number for which a block was not found.
block_number: u64,
},
#[error(
"Cannot construct OnlineClientAtBlock: failed to get spec version for block hash {block_hash}: {reason}"
)]
CannotGetSpecVersion {
/// The block hash for which we failed to get the spec version.
block_hash: String,
/// The error we encountered.
reason: String,
},
#[error(
"Cannot construct OnlineClientAtBlock: failed to get metadata for block hash {block_hash}: {reason}"
)]
CannotGetMetadata {
/// The block hash for which we failed to get the metadata.
block_hash: String,
/// The error we encountered.
reason: String,
},
#[error(
"Cannot inject types from metadata: failure to parse a type found in the metadata: {parse_error}"
)]
CannotInjectMetadataTypes {
/// Error parsing a type found in the metadata.
parse_error: scale_info_legacy::lookup_name::ParseError,
},
}
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
#[allow(missing_docs)]
@@ -277,36 +383,6 @@ impl AccountNonceError {
}
}
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
#[allow(missing_docs)]
pub enum OnlineClientError {
#[error("Cannot construct OnlineClient: {0}")]
RpcError(#[from] subxt_rpcs::Error),
#[error(
"Cannot construct OnlineClient: Cannot fetch latest finalized block to obtain init details from: {0}"
)]
CannotGetLatestFinalizedBlock(BackendError),
#[error("Cannot construct OnlineClient: Cannot fetch genesis hash: {0}")]
CannotGetGenesisHash(BackendError),
#[error("Cannot construct OnlineClient: Cannot fetch current runtime version: {0}")]
CannotGetCurrentRuntimeVersion(BackendError),
#[error("Cannot construct OnlineClient: Cannot fetch metadata: {0}")]
CannotFetchMetadata(BackendError),
}
impl OnlineClientError {
fn backend_error(&self) -> Option<&BackendError> {
match self {
OnlineClientError::CannotGetLatestFinalizedBlock(e)
| OnlineClientError::CannotGetGenesisHash(e)
| OnlineClientError::CannotGetCurrentRuntimeVersion(e)
| OnlineClientError::CannotFetchMetadata(e) => Some(e),
_ => None,
}
}
}
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
#[allow(missing_docs)]
+1 -1
View File
@@ -36,8 +36,8 @@ pub mod config;
pub mod client;
pub mod error;
pub mod utils;
pub mod backend;
// pub mod book;
// pub mod backend;
// pub mod blocks;
// pub mod constants;
// pub mod custom_values;
+27 -139
View File
@@ -15,9 +15,9 @@ use serde::{Deserialize, Deserializer, Serialize};
use std::collections::{HashMap, VecDeque};
use std::task::Poll;
/// An interface to call the unstable RPC methods. This interface is instantiated with
/// some `T: Config` trait which determines some of the types that the RPC methods will
/// take or hand back.
/// An interface to call the new ["chainHead" RPC methods](https://paritytech.github.io/json-rpc-interface-spec/).
/// This interface is instantiated with some `T: RpcConfig` trait which determines some of the types that
/// the RPC methods will take or hand back.
#[derive_where(Clone, Debug)]
pub struct ChainHeadRpcMethods<T> {
client: RpcClient,
@@ -386,14 +386,15 @@ impl<T: RpcConfig> ChainHeadRpcMethods<T> {
pub async fn archive_v1_storage(
&self,
block_hash: T::Hash,
items: impl IntoIterator<Item = StorageQuery<&[u8]>>,
items: impl IntoIterator<Item = ArchiveStorageQuery<&[u8]>>,
child_key: Option<&[u8]>,
) -> Result<ArchiveStorageSubscription<T::Hash>, Error> {
let items: Vec<StorageQuery<String>> = items
let items: Vec<ArchiveStorageQuery<String>> = items
.into_iter()
.map(|item| StorageQuery {
.map(|item| ArchiveStorageQuery {
key: to_hex(item.key),
query_type: item.query_type,
pagination_start_key: item.pagination_start_key.map(|k| to_hex(k)),
})
.collect();
@@ -408,137 +409,6 @@ impl<T: RpcConfig> ChainHeadRpcMethods<T> {
Ok(ArchiveStorageSubscription { sub, done: false })
}
// Dev note: we continue to support the latest "unstable" archive methods because
// they will be around for a while before the stable ones make it into a release.
// The below are just a copy-paste of the v1 methods, above, but calling the
// "unstable" RPCs instead. Eventually we'll remove them.
/// Fetch the block body (ie the extrinsics in the block) given its hash.
///
/// Returns an array of the hexadecimal-encoded scale-encoded extrinsics found in the block,
/// or `None` if the block wasn't found.
pub async fn archive_unstable_body(
&self,
block_hash: T::Hash,
) -> Result<Option<Vec<Bytes>>, Error> {
self.client
.request("archive_unstable_body", rpc_params![block_hash])
.await
}
/// Call the `archive_unstable_call` method and return the response.
pub async fn archive_unstable_call(
&self,
block_hash: T::Hash,
function: &str,
call_parameters: &[u8],
) -> Result<ArchiveCallResult, Error> {
use serde::de::Error as _;
// We deserialize to this intermediate shape, since
// we can't have a boolean tag to denote variants.
#[derive(Deserialize)]
struct Response {
success: bool,
value: Option<Bytes>,
error: Option<String>,
// This was accidentally used instead of value in Substrate,
// so to support those impls we try it here if needed:
result: Option<Bytes>,
}
let res: Response = self
.client
.request(
"archive_unstable_call",
rpc_params![block_hash, function, to_hex(call_parameters)],
)
.await?;
let value = res.value.or(res.result);
match (res.success, value, res.error) {
(true, Some(value), _) => Ok(ArchiveCallResult::Success(value)),
(false, _, err) => Ok(ArchiveCallResult::Error(err.unwrap_or(String::new()))),
(true, None, _) => {
let m = "archive_unstable_call: 'success: true' response should have `value: 0x1234` alongside it";
Err(Error::Deserialization(serde_json::Error::custom(m)))
}
}
}
/// Return the finalized block height of the chain.
pub async fn archive_unstable_finalized_height(&self) -> Result<usize, Error> {
self.client
.request("archive_unstable_finalizedHeight", rpc_params![])
.await
}
/// Return the genesis hash.
pub async fn archive_unstable_genesis_hash(&self) -> Result<T::Hash, Error> {
self.client
.request("archive_unstable_genesisHash", rpc_params![])
.await
}
/// Given a block height, return the hashes of the zero or more blocks at that height.
/// For blocks older than the latest finalized block, only one entry will be returned. For blocks
/// newer than the latest finalized block, it's possible to have 0, 1 or multiple blocks at
/// that height given that forks could occur.
pub async fn archive_unstable_hash_by_height(
&self,
height: usize,
) -> Result<Vec<T::Hash>, Error> {
self.client
.request("archive_unstable_hashByHeight", rpc_params![height])
.await
}
/// Fetch the header for a block with the given hash, or `None` if no block with that hash exists.
pub async fn archive_unstable_header(
&self,
block_hash: T::Hash,
) -> Result<Option<T::Header>, Error> {
let maybe_encoded_header: Option<Bytes> = self
.client
.request("archive_unstable_header", rpc_params![block_hash])
.await?;
let Some(encoded_header) = maybe_encoded_header else {
return Ok(None);
};
let header =
<T::Header as codec::Decode>::decode(&mut &*encoded_header.0).map_err(Error::Decode)?;
Ok(Some(header))
}
/// Query the node storage and return a subscription which streams corresponding storage events back.
pub async fn archive_unstable_storage(
&self,
block_hash: T::Hash,
items: impl IntoIterator<Item = StorageQuery<&[u8]>>,
child_key: Option<&[u8]>,
) -> Result<ArchiveStorageSubscription<T::Hash>, Error> {
let items: Vec<StorageQuery<String>> = items
.into_iter()
.map(|item| StorageQuery {
key: to_hex(item.key),
query_type: item.query_type,
})
.collect();
let sub = self
.client
.subscribe(
"archive_unstable_storage",
rpc_params![block_hash, items, child_key.map(to_hex)],
"archive_unstable_stopStorage",
)
.await?;
Ok(ArchiveStorageSubscription { sub, done: false })
}
}
/// This represents events generated by the `follow` method.
@@ -849,6 +719,24 @@ pub struct StorageQuery<Key> {
pub query_type: StorageQueryType,
}
/// The storage item received as parameter. This is used archive storage queries, and
/// unlike [`StorageQuery`] also contains `paginationStartKey` to define where iteration
/// should begin.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveStorageQuery<Key> {
/// The provided key.
pub key: Key,
/// The type of the storage query.
#[serde(rename = "type")]
pub query_type: StorageQueryType,
/// This parameter is optional and should be a string containing the hexadecimal-encoded key
/// from which the storage iteration should resume. This parameter is only valid in the context
/// of `descendantsValues` and `descendantsHashes`.
#[serde(skip_serializing_if = "Option::is_none")]
pub pagination_start_key: Option<Key>,
}
/// The type of the storage query.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
@@ -1104,7 +992,7 @@ impl<Hash> ArchiveStorageEvent<Hash> {
}
}
/// Something went wrong during the [`ChainHeadRpcMethods::archive_unstable_storage()`] subscription.
/// Something went wrong during the [`ChainHeadRpcMethods::archive_v1_storage()`] subscription.
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveStorageEventError {
@@ -1112,7 +1000,7 @@ pub struct ArchiveStorageEventError {
pub error: String,
}
/// A storage item returned from the [`ChainHeadRpcMethods::archive_unstable_storage()`] subscription.
/// A storage item returned from the [`ChainHeadRpcMethods::archive_v1_storage()`] subscription.
#[derive(Debug, Clone, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ArchiveStorageEventItem<Hash> {
+1 -1
View File
@@ -13,7 +13,7 @@ use primitive_types::U256;
use serde::{Deserialize, Serialize};
/// An interface to call the legacy RPC methods. This interface is instantiated with
/// some `T: Config` trait which determines some of the types that the RPC methods will
/// some `T: RpcConfig` trait which determines some of the types that the RPC methods will
/// take or hand back.
#[derive_where(Clone, Debug)]
pub struct LegacyRpcMethods<T> {