Add subxt-historic crate for accesing historic (non head-of-chain) blocks (#2040)

* WIP subxt-historic

* WIP subxt-historic

* WIP subxt-historic; flesh out basic foundations

* WIP filling in extrinsic decoding functionality

* iter and decode transaction extensions

* Fill in the Online/OfflineClient APIs and move more things to be part of the chain Config

* WIP storage

* clippy, fmt, finish extrinsics example

* prep for 0.0.1 release to claim crate name

* fix README link

* fmt

* WIP thinking about storage APIs

* WIP working out storage APIs

* Storage plain value fetching first pass

* WIP storage: first pass iterating over values done

* First apss finishing storage APIs

* fmt and clippy

* Create a storage example showing fetch and iteration

* Bump to frame-decode 0.9.0

* Bump subxt-historic to 0.0.3 for preview release

* Remove unused deps

* fix import

* clippy

* doc fixes

* tweak CI and fix some cargo hack findings

* Update README: subxt-historic is prerelease
This commit is contained in:
James Wilson
2025-08-26 17:56:21 +01:00
committed by GitHub
parent 23f3ebe6b5
commit 3aabd6dc09
33 changed files with 3220 additions and 55 deletions
+4 -1
View File
@@ -187,6 +187,9 @@ jobs:
- name: Cargo hack; check each subxt feature
run: cargo hack -p subxt --each-feature check --exclude-no-default-features --exclude-all-features --exclude-features web --features native
- name: Cargo hack; check each subxt feature
run: cargo hack -p subxt-historic --each-feature check --exclude-no-default-features --exclude-all-features --exclude-features web --features native
# Subxt-signer has the "subxt" features enabled in the "check all targets" test. Run it on its own to
# check it without. We can't enable subxt or web features here, so no cargo hack.
- name: Cargo check subxt-signer
@@ -213,7 +216,7 @@ jobs:
# Next, check each other package in isolation.
- name: Cargo hack; check each feature/crate on its own
run: cargo hack --exclude subxt --exclude subxt-signer --exclude subxt-lightclient --exclude subxt-rpcs --exclude-all-features --each-feature check --workspace
run: cargo hack --exclude subxt --exclude subxt-historic --exclude subxt-signer --exclude subxt-lightclient --exclude subxt-rpcs --exclude-all-features --each-feature check --workspace
# Check the full examples, which aren't a part of the workspace so are otherwise ignored.
- name: Cargo check parachain-example
Generated
+73 -2
View File
@@ -1931,16 +1931,20 @@ dependencies = [
[[package]]
name = "frame-decode"
version = "0.8.0"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1276c23a1fb234d9f81b5f71c526437f2a55ab4419f29bfe1196ac4ee2f706c"
checksum = "c470df86cf28818dd3cd2fc4667b80dbefe2236c722c3dc1d09e7c6c82d6dfcd"
dependencies = [
"frame-metadata 23.0.0",
"parity-scale-codec",
"scale-decode",
"scale-encode",
"scale-info",
"scale-info-legacy",
"scale-type-resolver",
"serde_yaml",
"sp-crypto-hashing",
"thiserror 2.0.12",
]
[[package]]
@@ -4447,6 +4451,21 @@ dependencies = [
"syn 2.0.101",
]
[[package]]
name = "scale-info-legacy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f5da3f59983b08a37d8d979d2326bdc00e8cca57b3d28fb05bdc0f6d7c28600c"
dependencies = [
"hashbrown 0.15.3",
"scale-type-resolver",
"serde",
"smallstr",
"smallvec",
"thiserror 2.0.12",
"yap",
]
[[package]]
name = "scale-type-resolver"
version = "0.2.0"
@@ -4744,6 +4763,19 @@ dependencies = [
"serde",
]
[[package]]
name = "serde_yaml"
version = "0.9.34+deprecated"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47"
dependencies = [
"indexmap 2.9.0",
"itoa",
"ryu",
"serde",
"unsafe-libyaml",
]
[[package]]
name = "serdect"
version = "0.2.0"
@@ -4860,6 +4892,15 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7"
[[package]]
name = "smallstr"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63b1aefdf380735ff8ded0b15f31aab05daf1f70216c01c02a12926badd1df9d"
dependencies = [
"smallvec",
]
[[package]]
name = "smallvec"
version = "1.15.0"
@@ -5670,6 +5711,28 @@ dependencies = [
"tracing",
]
[[package]]
name = "subxt-historic"
version = "0.0.3"
dependencies = [
"frame-decode",
"frame-metadata 23.0.0",
"futures",
"hex",
"parity-scale-codec",
"primitive-types",
"scale-decode",
"scale-info",
"scale-info-legacy",
"scale-type-resolver",
"scale-value",
"sp-crypto-hashing",
"subxt-rpcs",
"thiserror 2.0.12",
"tokio",
"url",
]
[[package]]
name = "subxt-lightclient"
version = "0.43.0"
@@ -6048,7 +6111,9 @@ dependencies = [
"bytes",
"libc",
"mio",
"parking_lot",
"pin-project-lite",
"signal-hook-registry",
"socket2",
"tokio-macros",
"windows-sys 0.52.0",
@@ -6399,6 +6464,12 @@ dependencies = [
"subtle",
]
[[package]]
name = "unsafe-libyaml"
version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861"
[[package]]
name = "untrusted"
version = "0.9.0"
+5 -2
View File
@@ -4,6 +4,7 @@ members = [
"codegen",
"core",
"lightclient",
"historic",
"testing/substrate-runner",
"testing/test-runtime",
"testing/integration-tests",
@@ -80,8 +81,8 @@ darling = "0.20.10"
derive-where = "1.2.7"
either = { version = "1.13.0", default-features = false }
finito = { version = "0.1.0", default-features = false }
frame-decode = { version = "0.8.0", default-features = false }
frame-metadata = { version = "23.0.0", default-features = false, features = ["unstable"] }
frame-decode = { version = "0.9.0", default-features = false }
frame-metadata = { version = "23.0.0", default-features = false }
futures = { version = "0.3.31", default-features = false, features = ["std"] }
getrandom = { version = "0.2", default-features = false }
hashbrown = "0.14.5"
@@ -101,6 +102,8 @@ scale-value = { version = "0.18.0", default-features = false }
scale-bits = { version = "0.7.0", default-features = false }
scale-decode = { version = "0.16.0", default-features = false }
scale-encode = { version = "0.10.0", default-features = false }
scale-type-resolver = { version = "0.2.0" }
scale-info-legacy = { version = "0.2.3" }
scale-typegen = "0.11.1"
scale-typegen-description = "0.11.0"
serde = { version = "1.0.210", default-features = false, features = ["derive"] }
+63
View File
@@ -0,0 +1,63 @@
[package]
name = "subxt-historic"
version = "0.0.3"
authors.workspace = true
edition.workspace = true
rust-version.workspace = true
publish = true
license.workspace = true
readme = "README.md"
repository.workspace = true
documentation.workspace = true
homepage.workspace = true
description = "Download non head-of-chain blocks and state from Substrate based nodes"
keywords = ["parity", "substrate", "blockchain"]
[lints]
workspace = true
[features]
default = ["jsonrpsee", "native"]
# Enable this for native (ie non web/wasm builds).
# Exactly 1 of "web" and "native" is expected.
native = [
"subxt-rpcs/native",
]
# Enable this for web/wasm builds.
# Exactly 1 of "web" and "native" is expected.
web = [
"subxt-rpcs/web",
]
# Enable this to use the reconnecting rpc client
reconnecting-rpc-client = ["subxt-rpcs/reconnecting-rpc-client"]
# Enable this to use jsonrpsee, which enables the jsonrpsee RPC client, and
# a couple of util functions which rely on jsonrpsee.
jsonrpsee = [
"subxt-rpcs/jsonrpsee",
]
[dependencies]
subxt-rpcs = { workspace = true }
frame-decode = { workspace = true, features = ["legacy", "legacy-types"] }
frame-metadata = { workspace = true, features = ["std", "legacy"] }
scale-type-resolver = { workspace = true }
codec = { workspace = true }
primitive-types = { workspace = true }
scale-info = { workspace = true }
scale-info-legacy = { workspace = true }
scale-decode = { workspace = true }
thiserror = { workspace = true }
sp-crypto-hashing = { workspace = true }
url = { workspace = true }
futures = { workspace = true }
[dev-dependencies]
tokio = { workspace = true, features = ["full"] }
scale-value = { workspace = true }
scale-decode = { workspace = true, features = ["derive"] }
hex = { workspace = true }
+16
View File
@@ -0,0 +1,16 @@
# subxt-historic
**This create is a work in progress and currently is released only as a preview.**
While `subxt` is a library for working at the head of a chain (submitting transactions and obtaining the current state), `subxt-historic` is a library for decoding blocks and state that are anywhere in a chain. To broadly summarize the differences:
| Feature | subxt | subxt-historic |
|-----------------------------------------|------------------------------|-------------------------------|
| Block access | Head of chain | Any block in chain |
| Connection to chain | Light client or RPC node | Archive RPC nodes only |
| Transaction submission | Yes | No |
| Metadata compatibility | V14 and newer | Any version |
# Examples
See the [examples](https://github.com/paritytech/subxt/tree/master/historic/examples) folder for examples of how to use `subxt-historic`.
+83
View File
@@ -0,0 +1,83 @@
#![allow(missing_docs)]
use subxt_historic::{Error, OnlineClient, PolkadotConfig};
#[tokio::main]
async fn main() -> Result<(), Error> {
// Configuration for the Polkadot relay chain.
let config = PolkadotConfig::new();
// Create an online client for the Polkadot relay chain, pointed at a Polkadot archive node.
let client = OnlineClient::from_url(config, "wss://rpc.polkadot.io").await?;
// Iterate through some randomly selected old blocks to show how to fetch and decode extrinsics.
for block_number in 123456.. {
println!("=== Block {block_number} ===");
// Point the client at a specific block number. By default this will download and cache
// metadata for the required spec version (so it's cheaper to instantiate again), if it
// hasn't already, and borrow the relevant legacy types from the client.
let client_at_block = client.at(block_number).await?;
// Fetch the extrinsics at that block.
let extrinsics = client_at_block.extrinsics().fetch().await?;
// Now, we have various operations to work with them. Here we print out various details
// about each extrinsic.
for extrinsic in extrinsics.iter() {
println!(
"{}.{}",
extrinsic.call().pallet_name(),
extrinsic.call().name()
);
if let Some(signature) = extrinsic.signature_bytes() {
println!(" Signature: 0x{}", hex::encode(signature));
}
println!(" Call Data:");
// We can decode each of the fields (in this example we decode everything into a
// scale_value::Value type, which can represent any SCALE encoded data, but if you
// have an idea of the type then you can try to decode into that type instead):
for field in extrinsic.call().fields().iter() {
println!(
" {}: {}",
field.name(),
field.decode::<scale_value::Value>().unwrap()
);
}
// Or, all of them at once:
println!(
" All: {}",
extrinsic
.call()
.fields()
.decode::<scale_value::Composite<_>>()
.unwrap()
);
// We can also look at things like the transaction extensions:
if let Some(extensions) = extrinsic.transaction_extensions() {
println!(" Transaction Extensions:");
// We can decode each of them:
for extension in extensions.iter() {
println!(
" {}: {}",
extension.name(),
extension.decode::<scale_value::Value>().unwrap()
);
}
// Or all of them at once:
println!(
" All: {}",
extensions.decode::<scale_value::Composite<_>>().unwrap()
);
}
}
}
Ok(())
}
+89
View File
@@ -0,0 +1,89 @@
#![allow(missing_docs)]
use subxt_historic::{Error, OnlineClient, PolkadotConfig, ext::StreamExt};
#[tokio::main]
async fn main() -> Result<(), Error> {
// Configuration for the Polkadot relay chain.
let config = PolkadotConfig::new();
// Create an online client for the Polkadot relay chain, pointed at a Polkadot archive node.
let client = OnlineClient::from_url(config, "wss://rpc.polkadot.io").await?;
// Iterate through some randomly selected blocks to show how to fetch and decode storage.
for block_number in 12345678.. {
println!("=== Block {block_number} ===");
// Point the client at a specific block number. By default this will download and cache
// metadata for the required spec version (so it's cheaper to instantiate again), if it
// hasn't already, and borrow the relevant legacy types from the client.
let client_at_block = client.at(block_number).await?;
// We'll work the account balances at the given block, for this example.
let account_balances = client_at_block
.storage()
.entry("System", "Account")?
.into_map()?;
// We can fetch a specific account balance by its key, like so (here I just picked a random key
// I knew to exist from iterating over storage entries):
let account_id_hex = "9a4d0faa2ba8c3cc5711852960940793acf55bf195b6eecf88fa78e961d0ce4a";
let account_id = hex::decode(account_id_hex).unwrap();
if let Some(entry) = account_balances.fetch((account_id,)).await? {
// We can decode the value into our generic `scale_value::Value` type, which can
// represent any SCALE-encoded value, like so:
let _balance_info = entry.decode::<scale_value::Value>()?;
// Or, if we know what shape to expect, we can decode the parts of the value that we care
// about directly into a static type, which is more efficient and allows easy type-safe
// access, like so:
#[derive(scale_decode::DecodeAsType)]
struct BalanceInfo {
data: BalanceInfoData,
}
#[derive(scale_decode::DecodeAsType)]
struct BalanceInfoData {
free: u128,
reserved: u128,
misc_frozen: u128,
fee_frozen: u128,
}
let balance_info = entry.decode::<BalanceInfo>()?;
println!(
" Single balance info from {account_id_hex} => free: {} reserved: {} misc_frozen: {} fee_frozen: {}",
balance_info.data.free,
balance_info.data.reserved,
balance_info.data.misc_frozen,
balance_info.data.fee_frozen,
);
}
// Or we can iterate over all of the account balances and print them out, like so. Here we provide an
// empty tuple, indicating that we want to iterate over everything and not only things under a certain key
// (in the case of account balances, there is only one key anyway, but other storage entries may map from
// several keys to a value, and for those we can choose which depth we iterate at by providing as many keys
// as we want and leaving the rest). Here I only take the first 10 accounts I find for the sake of the example.
let mut all_balances = account_balances.iter(()).await?.take(10);
while let Some(entry) = all_balances.next().await {
let entry = entry?;
let key = entry.decode_key()?;
// Decode the account ID from the key (we know here that we're working
// with a map which has one value, an account ID, so we just decode that part:
let account_id = key
.part(0)
.unwrap()
.decode::<[u8; 32]>()?
.expect("We expect this key to decode into a 32 byte AccountId");
let account_id_hex = hex::encode(account_id);
// Decode these values into our generic scale_value::Value type. Less efficient than
// defining a static type as above, but easier for the sake of the example.
let balance_info = entry.decode_value::<scale_value::Value>()?;
println!(" {account_id_hex} => {balance_info}");
}
}
Ok(())
}
+47
View File
@@ -0,0 +1,47 @@
mod offline_client;
mod online_client;
use crate::config::Config;
use crate::extrinsics::ExtrinsicsClient;
use crate::storage::StorageClient;
use std::marker::PhantomData;
// We keep these traits internal, so that we can mess with them later if needed,
// and instead only the concrete types are public which wrap these trait impls.
pub(crate) use offline_client::OfflineClientAtBlockT;
pub(crate) use online_client::OnlineClientAtBlockT;
pub use offline_client::OfflineClient;
pub use online_client::OnlineClient;
/// This represents a client at a specific block number.
pub struct ClientAtBlock<Client, T> {
client: Client,
marker: PhantomData<T>,
}
impl<Client, T> ClientAtBlock<Client, T> {
/// Construct a new client at some block.
pub(crate) fn new(client: Client) -> Self {
Self {
client,
marker: PhantomData,
}
}
}
impl<'client, T, Client> ClientAtBlock<Client, T>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
/// Work with extrinsics.
pub fn extrinsics(&'_ self) -> ExtrinsicsClient<'_, Client, T> {
ExtrinsicsClient::new(&self.client)
}
/// Work with storage.
pub fn storage(&'_ self) -> StorageClient<'_, Client, T> {
StorageClient::new(&self.client)
}
}
+85
View File
@@ -0,0 +1,85 @@
use super::ClientAtBlock;
use crate::config::Config;
use crate::error::OfflineClientAtBlockError;
use frame_metadata::RuntimeMetadata;
use scale_info_legacy::TypeRegistrySet;
use std::sync::Arc;
/// A client which exposes the means to decode historic data on a chain offline.
#[derive(Clone, Debug)]
pub struct OfflineClient<T: Config> {
/// The configuration for this client.
config: Arc<T>,
}
impl<T: Config> OfflineClient<T> {
/// Create a new [`OfflineClient`] with the given configuration.
pub fn new(config: T) -> Self {
OfflineClient {
config: Arc::new(config),
}
}
/// Pick the block height at which to operate. This references data from the
/// [`OfflineClient`] it's called on, and so cannot outlive it.
pub fn at<'this>(
&'this self,
block_number: u64,
) -> Result<ClientAtBlock<OfflineClientAtBlock<'this, T>, T>, OfflineClientAtBlockError> {
let config = &self.config;
let spec_version = self
.config
.spec_version_for_block_number(block_number)
.ok_or(OfflineClientAtBlockError::SpecVersionNotFound { block_number })?;
let legacy_types = self.config.legacy_types_for_spec_version(spec_version);
let metadata = self
.config
.metadata_for_spec_version(spec_version)
.ok_or(OfflineClientAtBlockError::MetadataNotFound { spec_version })?;
Ok(ClientAtBlock::new(OfflineClientAtBlock {
config,
legacy_types,
metadata,
}))
}
}
/// This represents an offline-only client at a specific block.
#[doc(hidden)]
pub trait OfflineClientAtBlockT<'client, T: Config + 'client> {
/// Get the configuration for this client.
fn config(&self) -> &'client T;
/// Get the legacy types that work at this block.
fn legacy_types(&'_ self) -> &TypeRegistrySet<'client>;
/// Get the metadata appropriate for this block.
fn metadata(&self) -> &RuntimeMetadata;
}
// Dev note: this shouldn't need to be exposed unless there is some
// need to explicitly name the ClientAAtBlock type. Rather keep it
// private to allow changes if possible.
#[doc(hidden)]
pub struct OfflineClientAtBlock<'client, T: Config + 'client> {
/// The configuration for thie chain.
config: &'client T,
/// Historic types to use at this block number.
legacy_types: TypeRegistrySet<'client>,
/// Metadata to use at this block number.
metadata: Arc<RuntimeMetadata>,
}
impl<'client, T: Config + 'client> OfflineClientAtBlockT<'client, T>
for OfflineClientAtBlock<'client, T>
{
fn config(&self) -> &'client T {
self.config
}
fn legacy_types(&self) -> &TypeRegistrySet<'client> {
&self.legacy_types
}
fn metadata(&self) -> &RuntimeMetadata {
&self.metadata
}
}
+327
View File
@@ -0,0 +1,327 @@
use super::ClientAtBlock;
use crate::client::OfflineClientAtBlockT;
use crate::config::Config;
use crate::error::OnlineClientAtBlockError;
use codec::{Compact, Decode, Encode};
use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed};
use scale_info_legacy::TypeRegistrySet;
use std::sync::Arc;
use subxt_rpcs::methods::chain_head::ArchiveCallResult;
use subxt_rpcs::{ChainHeadRpcMethods, RpcClient};
#[cfg(feature = "jsonrpsee")]
#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))]
use crate::error::OnlineClientError;
/// A client which exposes the means to decode historic data on a chain online.
#[derive(Clone, Debug)]
pub struct OnlineClient<T: Config> {
inner: Arc<OnlineClientInner<T>>,
}
#[derive(Debug)]
struct OnlineClientInner<T: Config> {
/// The configuration for this client.
config: T,
/// The RPC methods used to communicate with the node.
rpc_methods: ChainHeadRpcMethods<T>,
}
// The default constructors assume Jsonrpsee.
#[cfg(feature = "jsonrpsee")]
#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))]
impl<T: Config> OnlineClient<T> {
/// Construct a new [`OnlineClient`] using default settings which
/// point to a locally running node on `ws://127.0.0.1:9944`.
///
/// **Note:** This will only work if the local node is an archive node.
pub async fn new(config: T) -> Result<OnlineClient<T>, OnlineClientError> {
let url = "ws://127.0.0.1:9944";
OnlineClient::from_url(config, url).await
}
/// Construct a new [`OnlineClient`], providing a URL to connect to.
pub async fn from_url(
config: T,
url: impl AsRef<str>,
) -> Result<OnlineClient<T>, OnlineClientError> {
let url_str = url.as_ref();
let url = url::Url::parse(url_str).map_err(|_| OnlineClientError::InvalidUrl {
url: url_str.to_string(),
})?;
if !Self::is_url_secure(&url) {
return Err(OnlineClientError::RpcClientError(
subxt_rpcs::Error::InsecureUrl(url_str.to_string()),
));
}
OnlineClient::from_insecure_url(config, url).await
}
/// Construct a new [`OnlineClient`], providing a URL to connect to.
///
/// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs).
pub async fn from_insecure_url(
config: T,
url: impl AsRef<str>,
) -> Result<OnlineClient<T>, OnlineClientError> {
let rpc_client = RpcClient::from_insecure_url(url).await?;
Ok(OnlineClient::from_rpc_client(config, rpc_client))
}
fn is_url_secure(url: &url::Url) -> bool {
let secure_scheme = url.scheme() == "https" || url.scheme() == "wss";
let is_localhost = url.host().is_some_and(|e| match e {
url::Host::Domain(e) => e == "localhost",
url::Host::Ipv4(e) => e.is_loopback(),
url::Host::Ipv6(e) => e.is_loopback(),
});
secure_scheme || is_localhost
}
}
impl<T: Config> OnlineClient<T> {
/// Construct a new [`OnlineClient`] by providing an [`RpcClient`] to drive the connection,
/// and some configuration for the chain we're connecting to.
pub fn from_rpc_client(config: T, rpc_client: impl Into<RpcClient>) -> OnlineClient<T> {
let rpc_client = rpc_client.into();
let rpc_methods = ChainHeadRpcMethods::new(rpc_client);
OnlineClient {
inner: Arc::new(OnlineClientInner {
config,
rpc_methods,
}),
}
}
/// Pick the block height at which to operate. This references data from the
/// [`OnlineClient`] it's called on, and so cannot outlive it.
pub async fn at(
&'_ self,
block_number: u64,
) -> Result<ClientAtBlock<OnlineClientAtBlock<'_, T>, T>, OnlineClientAtBlockError> {
let config = &self.inner.config;
let rpc_methods = &self.inner.rpc_methods;
let block_hash = rpc_methods
.archive_v1_hash_by_height(block_number as usize)
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetBlockHash {
block_number,
reason: e,
})?
.pop()
.ok_or_else(|| OnlineClientAtBlockError::BlockNotFound { block_number })?
.into();
// Get our configuration, or fetch from the node if not available.
let spec_version =
if let Some(spec_version) = config.spec_version_for_block_number(block_number) {
spec_version
} else {
// Fetch spec version. Caching this doesn't really make sense, so either
// details are provided offline or we fetch them every time.
get_spec_version(rpc_methods, block_hash).await?
};
let metadata = if let Some(metadata) = config.metadata_for_spec_version(spec_version) {
metadata
} else {
// Fetch and then give our config the opportunity to cache this metadata.
let metadata = get_metadata(rpc_methods, block_hash).await?;
let metadata = Arc::new(metadata);
config.set_metadata_for_spec_version(spec_version, metadata.clone());
metadata
};
let historic_types = config.legacy_types_for_spec_version(spec_version);
Ok(ClientAtBlock::new(OnlineClientAtBlock {
config,
historic_types,
metadata,
rpc_methods,
block_hash,
}))
}
}
/// This represents an online client at a specific block.
#[doc(hidden)]
pub trait OnlineClientAtBlockT<'client, T: Config + 'client>:
OfflineClientAtBlockT<'client, T>
{
/// Return the RPC methods we'll use to interact with the node.
fn rpc_methods(&self) -> &ChainHeadRpcMethods<T>;
/// Return the block hash for the current block.
fn block_hash(&self) -> <T as Config>::Hash;
}
// Dev note: this shouldn't need to be exposed unless there is some
// need to explicitly name the ClientAAtBlock type. Rather keep it
// private to allow changes if possible.
#[doc(hidden)]
pub struct OnlineClientAtBlock<'client, T: Config + 'client> {
/// The configuration for this chain.
config: &'client T,
/// Historic types to use at this block number.
historic_types: TypeRegistrySet<'client>,
/// Metadata to use at this block number.
metadata: Arc<RuntimeMetadata>,
/// We also need RPC methods for online interactions.
rpc_methods: &'client ChainHeadRpcMethods<T>,
/// The block hash at which this client is operating.
block_hash: <T as Config>::Hash,
}
impl<'client, T: Config + 'client> OnlineClientAtBlockT<'client, T>
for OnlineClientAtBlock<'client, T>
{
fn rpc_methods(&self) -> &ChainHeadRpcMethods<T> {
self.rpc_methods
}
fn block_hash(&self) -> <T as Config>::Hash {
self.block_hash
}
}
impl<'client, T: Config + 'client> OfflineClientAtBlockT<'client, T>
for OnlineClientAtBlock<'client, T>
{
fn config(&self) -> &'client T {
self.config
}
fn legacy_types(&'_ self) -> &TypeRegistrySet<'client> {
&self.historic_types
}
fn metadata(&self) -> &RuntimeMetadata {
&self.metadata
}
}
async fn get_spec_version<T: Config>(
rpc_methods: &ChainHeadRpcMethods<T>,
block_hash: <T as Config>::Hash,
) -> Result<u32, OnlineClientAtBlockError> {
use codec::Decode;
use subxt_rpcs::methods::chain_head::ArchiveCallResult;
// make a runtime call to get the version information. This is also a constant
// in the metadata and so we could fetch it from there to avoid the call, but it would be a
// bit more effort.
let spec_version_bytes = {
let call_res = rpc_methods
.archive_v1_call(block_hash.into(), "Core_version", &[])
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion {
block_hash: block_hash.to_string(),
reason: format!("Error calling Core_version: {e}"),
})?;
match call_res {
ArchiveCallResult::Success(bytes) => bytes.0,
ArchiveCallResult::Error(e) => {
return Err(OnlineClientAtBlockError::CannotGetSpecVersion {
block_hash: block_hash.to_string(),
reason: format!("Core_version returned an error: {e}"),
});
}
}
};
// We only care about the spec version, so just decode enough of this version information
// to be able to pluck out what we want, and ignore the rest.
let spec_version = {
#[derive(codec::Decode)]
struct SpecVersionHeader {
_spec_name: String,
_impl_name: String,
_authoring_version: u32,
spec_version: u32,
}
SpecVersionHeader::decode(&mut &spec_version_bytes[..])
.map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion {
block_hash: block_hash.to_string(),
reason: format!("Error decoding Core_version response: {e}"),
})?
.spec_version
};
Ok(spec_version)
}
async fn get_metadata<T: Config>(
rpc_methods: &ChainHeadRpcMethods<T>,
block_hash: <T as Config>::Hash,
) -> Result<RuntimeMetadata, OnlineClientAtBlockError> {
// First, try to use the "modern" metadata APIs to get the most recent version we can.
let version_to_get = rpc_methods
.archive_v1_call(block_hash.into(), "Metadata_metadata_versions", &[])
.await
.ok()
.and_then(|res| res.as_success())
.and_then(|res| <Vec<u32>>::decode(&mut &res[..]).ok())
.and_then(|versions| {
// We want to filter out the "unstable" version, which is represented by u32::MAX.
versions.into_iter().filter(|v| *v != u32::MAX).max()
});
// We had success calling the above API, so we expect the "modern" metadata API to work.
if let Some(version_to_get) = version_to_get {
let version_bytes = version_to_get.encode();
let rpc_response = rpc_methods
.archive_v1_call(
block_hash.into(),
"Metadata_metadata_at_version",
&version_bytes,
)
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error calling Metadata_metadata_at_version: {e}"),
})
.and_then(|res| match res {
ArchiveCallResult::Success(bytes) => Ok(bytes.0),
ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Calling Metadata_metadata_at_version returned an error: {e}"),
}),
})?;
// Option because we may have asked for a version that doesn't exist. Compact because we get back a Vec<u8>
// of the metadata bytes, and the Vec is preceeded by it's compact encoded length. The actual bytes are then
// decoded as a `RuntimeMetadataPrefixed`, after this.
let (_, metadata) = <Option<(Compact<u32>, RuntimeMetadataPrefixed)>>::decode(&mut &rpc_response[..])
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error decoding response for Metadata_metadata_at_version: {e}"),
})?
.ok_or_else(|| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("No metadata returned for the latest version from Metadata_metadata_versions ({version_to_get})"),
})?;
return Ok(metadata.1);
}
// We didn't get a version from Metadata_metadata_versions, so fall back to the "old" API.
let metadata_bytes = rpc_methods
.archive_v1_call(block_hash.into(), "Metadata_metadata", &[])
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error calling Metadata_metadata: {e}"),
})
.and_then(|res| match res {
ArchiveCallResult::Success(bytes) => Ok(bytes.0),
ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Calling Metadata_metadata returned an error: {e}"),
}),
})?;
let (_, metadata) = <(Compact<u32>, RuntimeMetadataPrefixed)>::decode(&mut &metadata_bytes[..])
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error decoding response for Metadata_metadata: {e}"),
})?;
Ok(metadata.1)
}
+56
View File
@@ -0,0 +1,56 @@
pub mod polkadot;
pub mod substrate;
use scale_info_legacy::TypeRegistrySet;
use std::fmt::Display;
use std::sync::Arc;
use subxt_rpcs::RpcConfig;
pub use polkadot::PolkadotConfig;
pub use substrate::SubstrateConfig;
/// This represents the configuration needed for a specific chain. This includes
/// any hardcoded types we need to know about for that chain, as well as a means to
/// obtain historic types for that chain.
pub trait Config: RpcConfig {
/// The type of hashing used by the runtime.
type Hash: Clone
+ Copy
+ Display
+ Into<<Self as RpcConfig>::Hash>
+ From<<Self as RpcConfig>::Hash>;
/// Return the spec version for a given block number, if available.
///
/// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here,
/// but the [`crate::client::OfflineClient`] will error if this is not available for the required block number.
fn spec_version_for_block_number(&self, block_number: u64) -> Option<u32>;
/// Return the metadata for a given spec version, if available.
///
/// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here, and then
/// call [`Config::set_metadata_for_spec_version`] to give the configuration the opportunity to cache it.
/// The [`crate::client::OfflineClient`] will error if this is not available for the required spec version.
fn metadata_for_spec_version(
&self,
spec_version: u32,
) -> Option<Arc<frame_metadata::RuntimeMetadata>>;
/// Set some metadata for a given spec version. the [`crate::client::OnlineClient`] will call this if it has
/// to retrieve metadata from the chain, to give this the opportunity to cache it. The configuration can
/// do nothing if it prefers.
fn set_metadata_for_spec_version(
&self,
spec_version: u32,
metadata: Arc<frame_metadata::RuntimeMetadata>,
);
/// Return legacy types (ie types to use with Runtimes that return pre-V14 metadata) for a given spec version.
fn legacy_types_for_spec_version<'this>(
&'this self,
spec_version: u32,
) -> TypeRegistrySet<'this>;
/// Hash some bytes, for instance a block header or extrinsic, for this chain.
fn hash(s: &[u8]) -> <Self as Config>::Hash;
}
+88
View File
@@ -0,0 +1,88 @@
use super::Config;
use super::SubstrateConfig;
use scale_info_legacy::{ChainTypeRegistry, TypeRegistrySet};
use std::sync::Arc;
/// Configuration that's suitable for the Polkadot Relay Chain
pub struct PolkadotConfig(SubstrateConfig);
impl PolkadotConfig {
/// Create a new PolkadotConfig.
pub fn new() -> Self {
let config = SubstrateConfig::new()
.set_legacy_types(frame_decode::legacy_types::polkadot::relay_chain());
// TODO: Set spec versions as well with known spec version changes, to speed
// up accessing historic blocks within the known ranges. For now, we just let
// the online client look these up on chain.
Self(config)
}
/// Set the metadata to be used for decoding blocks at the given spec versions.
pub fn set_metadata_for_spec_versions(
mut self,
ranges: impl Iterator<Item = (u32, frame_metadata::RuntimeMetadata)>,
) -> Self {
self = Self(self.0.set_metadata_for_spec_versions(ranges));
self
}
/// Given an iterator of block ranges to spec version of the form `(start, end, spec_version)`, add them
/// to this configuration.
pub fn set_spec_version_for_block_ranges(
mut self,
ranges: impl Iterator<Item = (u64, u64, u32)>,
) -> Self {
self = Self(self.0.set_spec_version_for_block_ranges(ranges));
self
}
}
/// This hands back the legacy types for the Polkadot Relay Chain, which is what [`PolkadotConfig`] uses internally.
pub fn legacy_types() -> ChainTypeRegistry {
frame_decode::legacy_types::polkadot::relay_chain()
}
impl Default for PolkadotConfig {
fn default() -> Self {
Self::new()
}
}
impl Config for PolkadotConfig {
type Hash = <SubstrateConfig as Config>::Hash;
fn legacy_types_for_spec_version(&'_ self, spec_version: u32) -> TypeRegistrySet<'_> {
self.0.legacy_types_for_spec_version(spec_version)
}
fn spec_version_for_block_number(&self, block_number: u64) -> Option<u32> {
self.0.spec_version_for_block_number(block_number)
}
fn metadata_for_spec_version(
&self,
spec_version: u32,
) -> Option<Arc<frame_metadata::RuntimeMetadata>> {
self.0.metadata_for_spec_version(spec_version)
}
fn set_metadata_for_spec_version(
&self,
spec_version: u32,
metadata: Arc<frame_metadata::RuntimeMetadata>,
) {
self.0.set_metadata_for_spec_version(spec_version, metadata)
}
fn hash(s: &[u8]) -> <Self as Config>::Hash {
SubstrateConfig::hash(s)
}
}
impl subxt_rpcs::RpcConfig for PolkadotConfig {
type Hash = <SubstrateConfig as subxt_rpcs::RpcConfig>::Hash;
type Header = <SubstrateConfig as subxt_rpcs::RpcConfig>::Header;
type AccountId = <SubstrateConfig as subxt_rpcs::RpcConfig>::AccountId;
}
+129
View File
@@ -0,0 +1,129 @@
use super::Config;
use crate::utils::RangeMap;
use primitive_types::H256;
use scale_info_legacy::{ChainTypeRegistry, TypeRegistrySet};
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::Mutex;
/// Configuration that's suitable for standard Substrate chains (ie those
/// that have not customized the block hash type).
pub struct SubstrateConfig {
legacy_types: ChainTypeRegistry,
spec_version_for_block_number: RangeMap<u64, u32>,
metadata_for_spec_version: Mutex<HashMap<u32, Arc<frame_metadata::RuntimeMetadata>>>,
}
impl SubstrateConfig {
/// Create a new SubstrateConfig with no legacy types.
///
/// Without any further configuration, this will only work with
/// the [`crate::client::OnlineClient`] for blocks that were produced by Runtimes
/// that emit metadata V14 or later.
///
/// To support working at any block with the [`crate::client::OnlineClient`], you
/// must call [`SubstrateConfig::set_legacy_types`] with appropriate legacy type
/// definitions.
///
/// To support working with the [`crate::client::OfflineClient`] at any block,
/// you must also call:
/// - [`SubstrateConfig::set_metadata_for_spec_versions`] to set the metadata to
/// use at each spec version we might encounter.
/// - [`SubstrateConfig::set_spec_version_for_block_ranges`] to set the spec version
/// to use for each range of blocks we might encounter.
pub fn new() -> Self {
Self {
legacy_types: ChainTypeRegistry::empty(),
spec_version_for_block_number: RangeMap::empty(),
metadata_for_spec_version: Mutex::new(HashMap::new()),
}
}
/// Set the legacy types to use for this configuration. This enables support for
/// blocks produced by Runtimes that emit metadata older than V14.
pub fn set_legacy_types(mut self, legacy_types: ChainTypeRegistry) -> Self {
self.legacy_types = legacy_types;
self
}
/// Set the metadata to be used for decoding blocks at the given spec versions.
pub fn set_metadata_for_spec_versions(
self,
ranges: impl Iterator<Item = (u32, frame_metadata::RuntimeMetadata)>,
) -> Self {
let mut map = self.metadata_for_spec_version.lock().unwrap();
for (spec_version, metadata) in ranges {
map.insert(spec_version, Arc::new(metadata));
}
drop(map);
self
}
/// Given an iterator of block ranges to spec version of the form `(start, end, spec_version)`, add them
/// to this configuration.
pub fn set_spec_version_for_block_ranges(
mut self,
ranges: impl Iterator<Item = (u64, u64, u32)>,
) -> Self {
let mut m = RangeMap::builder();
for (start, end, spec_version) in ranges {
m = m.add_range(start, end, spec_version);
}
self.spec_version_for_block_number = m.build();
self
}
}
impl Default for SubstrateConfig {
fn default() -> Self {
Self::new()
}
}
impl Config for SubstrateConfig {
type Hash = H256;
fn legacy_types_for_spec_version(&'_ self, spec_version: u32) -> TypeRegistrySet<'_> {
self.legacy_types.for_spec_version(spec_version as u64)
}
fn spec_version_for_block_number(&self, block_number: u64) -> Option<u32> {
self.spec_version_for_block_number
.get(block_number)
.copied()
}
fn metadata_for_spec_version(
&self,
spec_version: u32,
) -> Option<Arc<frame_metadata::RuntimeMetadata>> {
self.metadata_for_spec_version
.lock()
.unwrap()
.get(&spec_version)
.cloned()
}
fn set_metadata_for_spec_version(
&self,
spec_version: u32,
metadata: Arc<frame_metadata::RuntimeMetadata>,
) {
self.metadata_for_spec_version
.lock()
.unwrap()
.insert(spec_version, metadata);
}
fn hash(s: &[u8]) -> <Self as Config>::Hash {
sp_crypto_hashing::blake2_256(s).into()
}
}
impl subxt_rpcs::RpcConfig for SubstrateConfig {
type Hash = <Self as Config>::Hash;
// We don't use these types in any of the RPC methods we call,
// so don't bother setting them up:
type Header = ();
type AccountId = ();
}
+309
View File
@@ -0,0 +1,309 @@
/// Any error emitted by this crate can convert into this.
// Dev Note: All errors here are transparent, because in many places
// the inner errors are returned and so need to provide enough context
// as-is, so there shouldn't be anything to add here.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum Error {
#[error(transparent)]
OnlineClientError(#[from] OnlineClientError),
#[error(transparent)]
OfflineClientAtBlockError(#[from] OfflineClientAtBlockError),
#[error(transparent)]
OnlineClientAtBlockError(#[from] OnlineClientAtBlockError),
#[error(transparent)]
ExtrinsicsError(#[from] ExtrinsicsError),
#[error(transparent)]
ExtrinsicTransactionExtensionError(#[from] ExtrinsicTransactionExtensionError),
#[error(transparent)]
ExtrinsicCallError(#[from] ExtrinsicCallError),
#[error(transparent)]
StorageError(#[from] StorageError),
#[error(transparent)]
StorageEntryIsNotAMap(#[from] StorageEntryIsNotAMap),
#[error(transparent)]
StorageEntryIsNotAPlainValue(#[from] StorageEntryIsNotAPlainValue),
#[error(transparent)]
StorageKeyError(#[from] StorageKeyError),
#[error(transparent)]
StorageValueError(#[from] StorageValueError),
}
/// Errors consctructing an online client.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum OnlineClientError {
#[error("Cannot construct OnlineClient: The URL provided is invalid: {url}")]
InvalidUrl {
/// The URL that was invalid.
url: String,
},
#[error("Cannot construct OnlineClient owing to an RPC client error: {0}")]
RpcClientError(#[from] subxt_rpcs::Error),
}
/// Errors constructing an offline client at a specific block number.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum OfflineClientAtBlockError {
#[error(
"Cannot construct OfflineClientAtBlock: spec version not found for block number {block_number}"
)]
SpecVersionNotFound {
/// The block number for which the spec version was not found.
block_number: u64,
},
#[error(
"Cannot construct OfflineClientAtBlock: metadata not found for spec version {spec_version}"
)]
MetadataNotFound {
/// The spec version for which the metadata was not found.
spec_version: u32,
},
}
/// Errors constructing an online client at a specific block number.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum OnlineClientAtBlockError {
#[error(
"Cannot construct OnlineClientAtBlock: failed to get block hash from node for block {block_number}: {reason}"
)]
CannotGetBlockHash {
/// Block number we failed to get the hash for.
block_number: u64,
/// The error we encountered.
reason: subxt_rpcs::Error,
},
#[error("Cannot construct OnlineClientAtBlock: block number {block_number} not found")]
BlockNotFound {
/// The block number for which a block was not found.
block_number: u64,
},
#[error(
"Cannot construct OnlineClientAtBlock: failed to get spec version for block hash {block_hash}: {reason}"
)]
CannotGetSpecVersion {
/// The block hash for which we failed to get the spec version.
block_hash: String,
/// The error we encountered.
reason: String,
},
#[error(
"Cannot construct OnlineClientAtBlock: failed to get metadata for block hash {block_hash}: {reason}"
)]
CannotGetMetadata {
/// The block hash for which we failed to get the metadata.
block_hash: String,
/// The error we encountered.
reason: String,
},
}
/// Errors working with extrinsics.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum ExtrinsicsError {
#[error("Could not fetch extrinsics: {reason}")]
FetchError {
/// The error that occurred while fetching the extrinsics.
reason: subxt_rpcs::Error,
},
#[error("Could not decode extrinsic at index {index}: {reason}")]
DecodeError {
/// The extrinsic index that failed to decode.
index: usize,
/// The error that occurred during decoding.
reason: frame_decode::extrinsics::ExtrinsicDecodeError,
},
#[error(
"Could not decode extrinsic at index {index}: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
LeftoverBytes {
/// The extrinsic index that had leftover bytes
index: usize,
/// The bytes that were left over after decoding the extrinsic.
leftover_bytes: Vec<u8>,
},
#[error("Could not decode extrinsics: Unsupported metadata version ({version})")]
UnsupportedMetadataVersion {
/// The metadata version that is not supported.
version: u32,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum ExtrinsicTransactionExtensionError {
#[error("Could not decode extrinsic transaction extensions: {reason}")]
AllDecodeError {
/// The error that occurred while decoding the transaction extensions.
reason: scale_decode::Error,
},
#[error(
"Could not decode extrinsic transaction extensions: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
AllLeftoverBytes {
/// The bytes that were left over after decoding the transaction extensions.
leftover_bytes: Vec<u8>,
},
#[error("Could not decode extrinsic transaction extension {name}: {reason}")]
DecodeError {
/// The name of the transaction extension that failed to decode.
name: String,
/// The error that occurred during decoding.
reason: scale_decode::Error,
},
#[error(
"Could not decode extrinsic transaction extension {name}: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
LeftoverBytes {
/// The name of the transaction extension that had leftover bytes.
name: String,
/// The bytes that were left over after decoding the transaction extension.
leftover_bytes: Vec<u8>,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum ExtrinsicCallError {
#[error("Could not decode the fields in extrinsic call: {reason}")]
FieldsDecodeError {
/// The error that occurred while decoding the fields of the extrinsic call.
reason: scale_decode::Error,
},
#[error(
"Could not decode the fields in extrinsic call: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
FieldsLeftoverBytes {
/// The bytes that were left over after decoding the extrinsic call.
leftover_bytes: Vec<u8>,
},
#[error("Could not decode field {name} in extrinsic call: {reason}")]
FieldDecodeError {
/// The name of the field that failed to decode.
name: String,
/// The error that occurred during decoding.
reason: scale_decode::Error,
},
#[error(
"Could not decode field {name} in extrinsic call: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
FieldLeftoverBytes {
/// The name of the field that had leftover bytes.
name: String,
/// The bytes that were left over after decoding the extrinsic call.
leftover_bytes: Vec<u8>,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[error("Storage entry is not a map: pallet {pallet_name}, storage {storage_name}")]
pub struct StorageEntryIsNotAMap {
/// The pallet containing the storage entry that was not found.
pub pallet_name: String,
/// The storage entry that was not found.
pub storage_name: String,
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[error("Storage entry is not a plain value: pallet {pallet_name}, storage {storage_name}")]
pub struct StorageEntryIsNotAPlainValue {
/// The pallet containing the storage entry that was not found.
pub pallet_name: String,
/// The storage entry that was not found.
pub storage_name: String,
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum StorageError {
#[error("RPC error interacting with storage APIs: {reason}")]
RpcError {
/// The error that occurred while fetching the storage entry.
reason: subxt_rpcs::Error,
},
#[error("Could not fetch next entry from storage subscription: {reason}")]
StorageEventError {
/// The error that occurred while fetching the next storage entry.
reason: String,
},
#[error("Could not construct storage key: {reason}")]
KeyEncodeError {
/// The error that occurred while constructing the storage key.
reason: frame_decode::storage::StorageKeyEncodeError,
},
#[error(
"Too many keys provided: expected {num_keys_expected} keys, but got {num_keys_provided}"
)]
WrongNumberOfKeysProvided {
/// The number of keys that were provided.
num_keys_provided: usize,
/// The number of keys expected.
num_keys_expected: usize,
},
#[error(
"Could not extract storage information from metadata: Unsupported metadata version ({version})"
)]
UnsupportedMetadataVersion {
/// The metadata version that is not supported.
version: u32,
},
#[error("Could not extract storage information from metadata: {reason}")]
ExtractStorageInfoError {
/// The error that occurred while extracting storage information from the metadata.
reason: frame_decode::storage::StorageInfoError<'static>,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum StorageKeyError {
#[error("Could not decode the storage key: {reason}")]
DecodeError {
/// The error that occurred while decoding the storage key information.
reason: frame_decode::storage::StorageKeyDecodeError<String>,
},
#[error(
"Could not decode the storage key: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
LeftoverBytes {
/// The bytes that were left over after decoding the storage key.
leftover_bytes: Vec<u8>,
},
#[error("Could not decode the part of the storage key at index {index}: {reason}")]
DecodePartError {
index: usize,
reason: scale_decode::Error,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum StorageValueError {
#[error("Could not decode storage value: {reason}")]
DecodeError {
/// The error that occurred while decoding the storage value.
reason: scale_decode::Error,
},
#[error(
"Could not decode storage value: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
LeftoverBytes {
/// The bytes that were left over after decoding the storage value.
leftover_bytes: Vec<u8>,
},
}
+76
View File
@@ -0,0 +1,76 @@
use crate::client::{OfflineClientAtBlockT, OnlineClientAtBlockT};
use crate::config::Config;
use crate::error::ExtrinsicsError;
mod extrinsic_call;
mod extrinsic_info;
mod extrinsic_transaction_extensions;
mod extrinsics_type;
pub use extrinsic_transaction_extensions::ExtrinsicTransactionExtensions;
pub use extrinsics_type::{Extrinsic, Extrinsics};
/// Work with extrinsics.
pub struct ExtrinsicsClient<'atblock, Client, T> {
client: &'atblock Client,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, Client, T> ExtrinsicsClient<'atblock, Client, T> {
/// Work with extrinsics.
pub(crate) fn new(client: &'atblock Client) -> Self {
Self {
client,
marker: std::marker::PhantomData,
}
}
}
// Things that we can do online with extrinsics.
impl<'atblock, 'client: 'atblock, Client, T> ExtrinsicsClient<'atblock, Client, T>
where
T: Config + 'client,
Client: OnlineClientAtBlockT<'client, T>,
{
/// Fetch the extrinsics for the current block. This is essentially a
/// combination of [`Self::fetch_bytes`] and [`Self::decode_from`].
pub async fn fetch(&self) -> Result<Extrinsics<'atblock>, ExtrinsicsError> {
let bytes: Vec<Vec<u8>> = self.fetch_bytes().await?;
// Small optimization; no need to decode anything if no bytes.
if bytes.is_empty() {
return Ok(Extrinsics::empty());
}
self.decode_from(bytes)
}
/// Fetch the bytes for the extrinsics in the current block.
pub async fn fetch_bytes(&self) -> Result<Vec<Vec<u8>>, ExtrinsicsError> {
let bytes: Vec<Vec<u8>> = self
.client
.rpc_methods()
.archive_v1_body(self.client.block_hash().into())
.await
.map_err(|e| ExtrinsicsError::FetchError { reason: e })?
.map(|body| body.into_iter().map(|b| b.0).collect())
.unwrap_or_default();
Ok(bytes)
}
}
// Things that we can do offline with extrinsics.
impl<'atblock, 'client: 'atblock, Client, T> ExtrinsicsClient<'atblock, Client, T>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
/// Given some bytes representing the extrinsics in this block, decode them into an [`Extrinsics`] type.
pub fn decode_from(
&self,
bytes: Vec<Vec<u8>>,
) -> Result<Extrinsics<'atblock>, ExtrinsicsError> {
Extrinsics::new(bytes, self.client)
}
}
+178
View File
@@ -0,0 +1,178 @@
use super::extrinsic_info::{AnyExtrinsicInfo, with_info};
use crate::error::ExtrinsicCallError;
use crate::utils::Either;
use scale_info_legacy::{LookupName, TypeRegistrySet};
/// This represents the call data in the extrinsic.
pub struct ExtrinsicCall<'extrinsics, 'atblock> {
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
}
impl<'extrinsics, 'atblock> ExtrinsicCall<'extrinsics, 'atblock> {
pub(crate) fn new(
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
) -> Self {
Self { all_bytes, info }
}
/// The index of the pallet that this call is for
pub fn pallet_index(&self) -> u8 {
with_info!(&self.info => info.info.pallet_index())
}
/// The name of the pallet that this call is for.
pub fn pallet_name(&self) -> &str {
with_info!(&self.info => info.info.pallet_name())
}
/// The index of this call.
pub fn index(&self) -> u8 {
with_info!(&self.info => info.info.call_index())
}
/// The name of this call.
pub fn name(&self) -> &str {
with_info!(&self.info => info.info.call_name())
}
/// Get the raw bytes for the entire call, which includes the pallet and call index
/// bytes as well as the encoded arguments for each of the fields.
pub fn bytes(&self) -> &'extrinsics [u8] {
with_info!(&self.info => &self.all_bytes[info.info.call_data_range()])
}
/// Work with the fields in this call.
pub fn fields(&self) -> ExtrinsicCallFields<'extrinsics, 'atblock> {
ExtrinsicCallFields::new(self.all_bytes, self.info)
}
}
/// This represents the fields of the call.
pub struct ExtrinsicCallFields<'extrinsics, 'atblock> {
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
}
impl<'extrinsics, 'atblock> ExtrinsicCallFields<'extrinsics, 'atblock> {
pub(crate) fn new(
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
) -> Self {
Self { all_bytes, info }
}
/// Return the bytes representing the fields stored in this extrinsic.
///
/// # Note
///
/// This is a subset of [`ExtrinsicCall::bytes`] that does not include the
/// first two bytes that denote the pallet index and the variant index.
pub fn bytes(&self) -> &'extrinsics [u8] {
with_info!(&self.info => &self.all_bytes[info.info.call_data_args_range()])
}
/// Iterate over each of the fields of the extrinsic call data.
pub fn iter(&self) -> impl Iterator<Item = ExtrinsicCallField<'extrinsics, 'atblock>> {
match &self.info {
AnyExtrinsicInfo::Legacy(info) => {
Either::A(info.info.call_data().map(|named_arg| ExtrinsicCallField {
field_bytes: &self.all_bytes[named_arg.range()],
info: AnyExtrinsicCallFieldInfo::Legacy(ExtrinsicCallFieldInfo {
info: named_arg,
resolver: info.resolver,
}),
}))
}
AnyExtrinsicInfo::Current(info) => {
Either::B(info.info.call_data().map(|named_arg| ExtrinsicCallField {
field_bytes: &self.all_bytes[named_arg.range()],
info: AnyExtrinsicCallFieldInfo::Current(ExtrinsicCallFieldInfo {
info: named_arg,
resolver: info.resolver,
}),
}))
}
}
}
/// Attempt to decode the fields into the given type.
pub fn decode<T: scale_decode::DecodeAsFields>(&self) -> Result<T, ExtrinsicCallError> {
with_info!(&self.info => {
let cursor = &mut self.bytes();
let mut fields = &mut info.info.call_data().map(|named_arg| {
scale_decode::Field::new(named_arg.ty().clone(), Some(named_arg.name()))
});
let decoded = T::decode_as_fields(cursor, &mut fields, info.resolver)
.map_err(|e| ExtrinsicCallError::FieldsDecodeError { reason: e })?;
if !cursor.is_empty() {
return Err(ExtrinsicCallError::FieldsLeftoverBytes {
leftover_bytes: cursor.to_vec(),
})
}
Ok(decoded)
})
}
}
pub struct ExtrinsicCallField<'extrinsics, 'atblock> {
field_bytes: &'extrinsics [u8],
info: AnyExtrinsicCallFieldInfo<'extrinsics, 'atblock>,
}
enum AnyExtrinsicCallFieldInfo<'extrinsics, 'atblock> {
Legacy(ExtrinsicCallFieldInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(ExtrinsicCallFieldInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>),
}
struct ExtrinsicCallFieldInfo<'extrinsics, 'atblock, TypeId, Resolver> {
info: &'extrinsics frame_decode::extrinsics::NamedArg<'atblock, TypeId>,
resolver: &'atblock Resolver,
}
macro_rules! with_call_field_info {
(&$self:ident.$info:ident => $fn:expr) => {
#[allow(clippy::clone_on_copy)]
match &$self.$info {
AnyExtrinsicCallFieldInfo::Legacy($info) => $fn,
AnyExtrinsicCallFieldInfo::Current($info) => $fn,
}
};
}
impl<'extrinsics, 'atblock> ExtrinsicCallField<'extrinsics, 'atblock> {
/// Get the raw bytes for this field.
pub fn bytes(&self) -> &'extrinsics [u8] {
self.field_bytes
}
/// Get the name of this field.
pub fn name(&self) -> &'extrinsics str {
with_call_field_info!(&self.info => info.info.name())
}
/// Attempt to decode the value of this field into the given type.
pub fn decode<T: scale_decode::DecodeAsType>(&self) -> Result<T, ExtrinsicCallError> {
with_call_field_info!(&self.info => {
let cursor = &mut &*self.field_bytes;
let decoded = T::decode_as_type(cursor, info.info.ty().clone(), info.resolver)
.map_err(|e| ExtrinsicCallError::FieldDecodeError {
name: info.info.name().to_string(),
reason: e,
})?;
if !cursor.is_empty() {
return Err(ExtrinsicCallError::FieldLeftoverBytes {
name: info.info.name().to_string(),
leftover_bytes: cursor.to_vec(),
});
}
Ok(decoded)
})
}
}
+109
View File
@@ -0,0 +1,109 @@
use crate::error::ExtrinsicsError;
use frame_metadata::RuntimeMetadata;
use scale_info_legacy::{LookupName, TypeRegistrySet};
// Extrinsic information for modern or legacy extrinsics.
#[allow(clippy::large_enum_variant)]
pub enum AnyExtrinsicInfo<'atblock> {
Legacy(ExtrinsicInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(ExtrinsicInfo<'atblock, u32, scale_info::PortableRegistry>),
}
impl<'atblock> AnyExtrinsicInfo<'atblock> {
/// For a slice of extrinsics, return a vec of information about each one.
pub fn new(
bytes: &[Vec<u8>],
metadata: &'atblock RuntimeMetadata,
legacy_types: &'atblock TypeRegistrySet<'atblock>,
) -> Result<Vec<Self>, ExtrinsicsError> {
let infos = match metadata {
RuntimeMetadata::V8(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V9(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V10(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V11(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V12(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V13(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V14(m) => extrinsic_info_inner(bytes, m, &m.types),
RuntimeMetadata::V15(m) => extrinsic_info_inner(bytes, m, &m.types),
RuntimeMetadata::V16(m) => extrinsic_info_inner(bytes, m, &m.types),
unknown => {
return Err(ExtrinsicsError::UnsupportedMetadataVersion {
version: unknown.version(),
});
}
}?;
fn extrinsic_info_inner<'atblock, Info, Resolver>(
bytes: &[Vec<u8>],
args_info: &'atblock Info,
type_resolver: &'atblock Resolver,
) -> Result<Vec<AnyExtrinsicInfo<'atblock>>, ExtrinsicsError>
where
Info: frame_decode::extrinsics::ExtrinsicTypeInfo,
Info::TypeId: Clone + core::fmt::Display + core::fmt::Debug + Send + Sync + 'static,
Resolver: scale_type_resolver::TypeResolver<TypeId = Info::TypeId>,
AnyExtrinsicInfo<'atblock>: From<ExtrinsicInfo<'atblock, Info::TypeId, Resolver>>,
{
bytes
.iter()
.enumerate()
.map(|(index, bytes)| {
let cursor = &mut &**bytes;
let extrinsic_info = frame_decode::extrinsics::decode_extrinsic(
cursor,
args_info,
type_resolver,
)
.map_err(|reason| ExtrinsicsError::DecodeError { index, reason })?;
if !cursor.is_empty() {
return Err(ExtrinsicsError::LeftoverBytes {
index,
leftover_bytes: cursor.to_vec(),
});
}
Ok(ExtrinsicInfo {
info: extrinsic_info,
resolver: type_resolver,
}
.into())
})
.collect()
}
Ok(infos)
}
}
impl<'atblock> From<ExtrinsicInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>>
for AnyExtrinsicInfo<'atblock>
{
fn from(info: ExtrinsicInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>) -> Self {
AnyExtrinsicInfo::Legacy(info)
}
}
impl<'atblock> From<ExtrinsicInfo<'atblock, u32, scale_info::PortableRegistry>>
for AnyExtrinsicInfo<'atblock>
{
fn from(info: ExtrinsicInfo<'atblock, u32, scale_info::PortableRegistry>) -> Self {
AnyExtrinsicInfo::Current(info)
}
}
// Extrinsic information for a specific type ID and resolver type.
pub struct ExtrinsicInfo<'atblock, TypeId, Resolver> {
pub info: frame_decode::extrinsics::Extrinsic<'atblock, TypeId>,
pub resolver: &'atblock Resolver,
}
macro_rules! with_info {
(&$self:ident.$info:ident => $fn:expr) => {
#[allow(clippy::clone_on_copy)]
match &$self.$info {
AnyExtrinsicInfo::Legacy($info) => $fn,
AnyExtrinsicInfo::Current($info) => $fn,
}
};
}
pub(crate) use with_info;
@@ -0,0 +1,213 @@
use super::extrinsic_info::AnyExtrinsicInfo;
use crate::error::ExtrinsicTransactionExtensionError;
use crate::utils::Either;
use frame_decode::helpers::scale_decode;
use scale_info_legacy::{LookupName, TypeRegistrySet};
// Extrinsic extensions information for modern or legacy extrinsics.
enum AnyExtrinsicExtensionsInfo<'extrinsics, 'atblock> {
Legacy(ExtrinsicExtensionsInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(ExtrinsicExtensionsInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>),
}
struct ExtrinsicExtensionsInfo<'extrinsics, 'atblock, TypeId, Resolver> {
info: &'extrinsics frame_decode::extrinsics::ExtrinsicExtensions<'atblock, TypeId>,
resolver: &'atblock Resolver,
}
/// This represents the transaction extensions of an extrinsic.
pub struct ExtrinsicTransactionExtensions<'extrinsics, 'atblock> {
all_bytes: &'extrinsics [u8],
info: AnyExtrinsicExtensionsInfo<'extrinsics, 'atblock>,
}
macro_rules! with_extensions_info {
(&$self:ident.$info:ident => $fn:expr) => {
#[allow(clippy::clone_on_copy)]
match &$self.$info {
AnyExtrinsicExtensionsInfo::Legacy($info) => $fn,
AnyExtrinsicExtensionsInfo::Current($info) => $fn,
}
};
}
impl<'extrinsics, 'atblock> ExtrinsicTransactionExtensions<'extrinsics, 'atblock> {
pub(crate) fn new(
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
) -> Option<Self> {
match info {
AnyExtrinsicInfo::Current(info) => {
let extension_info = info.info.transaction_extension_payload()?;
Some(Self {
all_bytes,
info: AnyExtrinsicExtensionsInfo::Current(ExtrinsicExtensionsInfo {
info: extension_info,
resolver: info.resolver,
}),
})
}
AnyExtrinsicInfo::Legacy(info) => {
let extension_info = info.info.transaction_extension_payload()?;
Some(Self {
all_bytes,
info: AnyExtrinsicExtensionsInfo::Legacy(ExtrinsicExtensionsInfo {
info: extension_info,
resolver: info.resolver,
}),
})
}
}
}
/// Get the raw bytes for all of the transaction extensions.
pub fn bytes(&self) -> &'extrinsics [u8] {
with_extensions_info!(&self.info => &self.all_bytes[info.info.range()])
}
/// iterate over each of the transaction extensions in this extrinsic.
pub fn iter(
&self,
) -> impl Iterator<Item = ExtrinsicTransactionExtension<'extrinsics, 'atblock>> {
match &self.info {
AnyExtrinsicExtensionsInfo::Legacy(extension_info) => {
let iter = extension_info
.info
.iter()
.map(|s| ExtrinsicTransactionExtension {
bytes: &self.all_bytes[s.range()],
info: ExtrinsicExtensionInfo {
name: s.name(),
type_id: s.ty(),
resolver: extension_info.resolver,
}
.into(),
});
Either::A(iter)
}
AnyExtrinsicExtensionsInfo::Current(extension_info) => {
let iter = extension_info
.info
.iter()
.map(|s| ExtrinsicTransactionExtension {
bytes: &self.all_bytes[s.range()],
info: ExtrinsicExtensionInfo {
name: s.name(),
type_id: s.ty(),
resolver: extension_info.resolver,
}
.into(),
});
Either::B(iter)
}
}
}
/// Attempt to decode the transaction extensions into a type where each field name is the name of the transaction
/// extension and the field value is the decoded extension.
pub fn decode<T: scale_decode::DecodeAsFields>(
&self,
) -> Result<T, ExtrinsicTransactionExtensionError> {
with_extensions_info!(&self.info => {
let cursor = &mut self.bytes();
let mut fields = &mut info.info.iter().map(|named_arg| {
scale_decode::Field::new(named_arg.ty().clone(), Some(named_arg.name()))
});
let decoded = T::decode_as_fields(cursor, &mut fields, info.resolver)
.map_err(|e| ExtrinsicTransactionExtensionError::AllDecodeError { reason: e })?;
if !cursor.is_empty() {
return Err(ExtrinsicTransactionExtensionError::AllLeftoverBytes {
leftover_bytes: cursor.to_vec(),
})
}
Ok(decoded)
})
}
}
// Extrinsic single extension information for modern or legacy extrinsics.
enum AnyExtrinsicExtensionInfo<'extrinsics, 'atblock> {
Legacy(ExtrinsicExtensionInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(ExtrinsicExtensionInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>),
}
impl<'extrinsics, 'atblock>
From<ExtrinsicExtensionInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>>
for AnyExtrinsicExtensionInfo<'extrinsics, 'atblock>
{
fn from(
info: ExtrinsicExtensionInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>,
) -> Self {
AnyExtrinsicExtensionInfo::Legacy(info)
}
}
impl<'extrinsics, 'atblock>
From<ExtrinsicExtensionInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>>
for AnyExtrinsicExtensionInfo<'extrinsics, 'atblock>
{
fn from(
info: ExtrinsicExtensionInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>,
) -> Self {
AnyExtrinsicExtensionInfo::Current(info)
}
}
struct ExtrinsicExtensionInfo<'extrinsics, 'atblock, TypeId, Resolver> {
name: &'extrinsics str,
type_id: &'extrinsics TypeId,
resolver: &'atblock Resolver,
}
macro_rules! with_extension_info {
(&$self:ident.$info:ident => $fn:expr) => {
#[allow(clippy::clone_on_copy)]
match &$self.$info {
AnyExtrinsicExtensionInfo::Legacy($info) => $fn,
AnyExtrinsicExtensionInfo::Current($info) => $fn,
}
};
}
/// This represents a single transaction extension in an extrinsic.
pub struct ExtrinsicTransactionExtension<'extrinsics, 'atblock> {
bytes: &'extrinsics [u8],
info: AnyExtrinsicExtensionInfo<'extrinsics, 'atblock>,
}
impl<'extrinsics, 'atblock> ExtrinsicTransactionExtension<'extrinsics, 'atblock> {
/// The bytes for this transaction extension.
pub fn bytes(&self) -> &'extrinsics [u8] {
self.bytes
}
/// The name/identifier for this transaction extension.
pub fn name(&self) -> &'extrinsics str {
with_extension_info!(&self.info => info.name)
}
/// Decode the bytes for this transaction extension into a type that implements `scale_decode::DecodeAsType`.
pub fn decode<T: scale_decode::DecodeAsType>(
&self,
) -> Result<T, ExtrinsicTransactionExtensionError> {
with_extension_info!(&self.info => {
let cursor = &mut &*self.bytes;
let decoded = T::decode_as_type(cursor, info.type_id.clone(), info.resolver)
.map_err(|reason| ExtrinsicTransactionExtensionError::DecodeError {
name: info.name.to_string(),
reason
})?;
if !cursor.is_empty() {
return Err(ExtrinsicTransactionExtensionError::LeftoverBytes {
name: info.name.to_string(),
leftover_bytes: cursor.to_vec(),
});
}
Ok(decoded)
})
}
}
+115
View File
@@ -0,0 +1,115 @@
use super::extrinsic_call::ExtrinsicCall;
use super::extrinsic_info::{AnyExtrinsicInfo, with_info};
use super::extrinsic_transaction_extensions::ExtrinsicTransactionExtensions;
use crate::client::OfflineClientAtBlockT;
use crate::config::Config;
use crate::error::ExtrinsicsError;
/// This represents some extrinsics in a block, and carries everything that we need to decode information out of them.
pub struct Extrinsics<'atblock> {
bytes: Vec<Vec<u8>>,
// Each index in this vec should line up with one index in the above vec.
infos: Vec<AnyExtrinsicInfo<'atblock>>,
}
impl<'atblock> Extrinsics<'atblock> {
// In here we hide the messy logic needed to decode extrinsics into a consistent output given either current or legacy metadata.
pub(crate) fn new<'client: 'atblock, T, Client>(
bytes: Vec<Vec<u8>>,
client: &'atblock Client,
) -> Result<Self, ExtrinsicsError>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
let infos = AnyExtrinsicInfo::new(&bytes, client.metadata(), client.legacy_types())?;
Ok(Extrinsics { bytes, infos })
}
pub(crate) fn empty() -> Self {
Self {
bytes: vec![],
infos: vec![],
}
}
/// How many extrinsics are in this block?
pub fn len(&self) -> usize {
self.bytes.len()
}
/// Are there any extrinsics in this block?
pub fn is_empty(&self) -> bool {
self.bytes.is_empty()
}
/// Iterate over the extrinsics.
pub fn iter(&self) -> impl Iterator<Item = Extrinsic<'_, 'atblock>> {
self.bytes
.iter()
.zip(self.infos.iter())
.enumerate()
.map(|(idx, (bytes, info))| Extrinsic { idx, bytes, info })
}
}
/// This represents an extrinsic, and carries everything that we need to decode information out of it.
pub struct Extrinsic<'extrinsics, 'atblock> {
idx: usize,
bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
}
impl<'extrinsics, 'atblock> Extrinsic<'extrinsics, 'atblock> {
/// Get the index of this extrinsic in the block.
pub fn index(&self) -> usize {
self.idx
}
/// Get the raw bytes of this extrinsic.
pub fn bytes(&self) -> &'extrinsics [u8] {
self.bytes
}
/// Is this extrinsic signed?
pub fn is_signed(&self) -> bool {
with_info!(&self.info => info.info.is_signed())
}
/// Return information about the call that this extrinsic is making.
pub fn call(&self) -> ExtrinsicCall<'extrinsics, 'atblock> {
ExtrinsicCall::new(self.bytes, self.info)
}
/// Return only the bytes of the address that signed this extrinsic.
///
/// # Note
///
/// Returns `None` if the extrinsic is not signed.
pub fn address_bytes(&self) -> Option<&'extrinsics [u8]> {
with_info!(&self.info => {
info.info
.signature_payload()
.map(|s| &self.bytes[s.address_range()])
})
}
/// Returns Some(signature_bytes) if the extrinsic was signed otherwise None is returned.
pub fn signature_bytes(&self) -> Option<&'extrinsics [u8]> {
with_info!(&self.info => {
info.info
.signature_payload()
.map(|s| &self.bytes[s.signature_range()])
})
}
/// Get information about the transaction extensions of this extrinsic.
pub fn transaction_extensions(
&self,
) -> Option<ExtrinsicTransactionExtensions<'extrinsics, 'atblock>> {
ExtrinsicTransactionExtensions::new(self.bytes, self.info)
}
}
// TODO add extrinsic.call() with .bytes, and .decode function to make it easy to decode call fields into Value or whatever.
// Then add this to the example. Make sure we can do everything that dot-block-decoder does easily.
+22
View File
@@ -0,0 +1,22 @@
//! `subxt-historic` is a library for working with non head-of-chain data on Substrate-based blockchains.
// TODO: Remove this when we're ready to release, and document everything!
#![allow(missing_docs)]
mod utils;
pub mod client;
pub mod config;
pub mod error;
pub mod extrinsics;
pub mod storage;
pub use client::{OfflineClient, OnlineClient};
pub use config::polkadot::PolkadotConfig;
pub use config::substrate::SubstrateConfig;
pub use error::Error;
/// External types and crates that may be useful.
pub mod ext {
pub use futures::stream::{Stream, StreamExt};
}
+411
View File
@@ -0,0 +1,411 @@
mod storage_entry;
mod storage_info;
mod storage_key;
mod storage_value;
use crate::client::{OfflineClientAtBlockT, OnlineClientAtBlockT};
use crate::config::Config;
use crate::error::{StorageEntryIsNotAMap, StorageEntryIsNotAPlainValue, StorageError};
use crate::storage::storage_info::with_info;
use storage_info::AnyStorageInfo;
pub use storage_entry::StorageEntry;
pub use storage_key::{StorageHasher, StorageKey, StorageKeyPart};
pub use storage_value::StorageValue;
// We take how storage keys can be passed in from `frame-decode`, so re-export here.
pub use frame_decode::storage::{IntoStorageKeys, StorageKeys};
/// Work with storage.
pub struct StorageClient<'atblock, Client, T> {
client: &'atblock Client,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, Client, T> StorageClient<'atblock, Client, T> {
/// Work with storage.
pub(crate) fn new(client: &'atblock Client) -> Self {
Self {
client,
marker: std::marker::PhantomData,
}
}
}
// Things that we can do offline with storage.
impl<'atblock, 'client: 'atblock, Client, T> StorageClient<'atblock, Client, T>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
/// Select the storage entry you'd like to work with.
pub fn entry(
&self,
pallet_name: impl Into<String>,
storage_name: impl Into<String>,
) -> Result<StorageEntryClient<'atblock, Client, T>, StorageError> {
let pallet_name = pallet_name.into();
let storage_name = storage_name.into();
let storage_info = AnyStorageInfo::new(
&pallet_name,
&storage_name,
self.client.metadata(),
self.client.legacy_types(),
)?;
if storage_info.is_map() {
Ok(StorageEntryClient::Map(StorageEntryMapClient {
client: self.client,
pallet_name,
storage_name,
info: storage_info,
marker: std::marker::PhantomData,
}))
} else {
Ok(StorageEntryClient::Plain(StorageEntryPlainClient {
client: self.client,
pallet_name,
storage_name,
info: storage_info,
marker: std::marker::PhantomData,
}))
}
}
/// Iterate over all of the storage entries listed in the metadata for the current block. This does **not** include well known
/// storage entries like `:code` which are not listed in the metadata.
pub fn entries(&self) -> impl Iterator<Item = StorageEntriesItem<'atblock, Client, T>> {
let client = self.client;
let metadata = client.metadata();
frame_decode::helpers::list_storage_entries_any(metadata).map(|entry| StorageEntriesItem {
entry,
client: self.client,
marker: std::marker::PhantomData,
})
}
}
/// Working with a specific storage entry.
pub struct StorageEntriesItem<'atblock, Client, T> {
entry: frame_decode::helpers::StorageEntry<'atblock>,
client: &'atblock Client,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, 'client: 'atblock, Client, T> StorageEntriesItem<'atblock, Client, T>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
/// The pallet name.
pub fn pallet_name(&self) -> &str {
self.entry.pallet()
}
/// The storage entry name.
pub fn storage_name(&self) -> &str {
self.entry.entry()
}
/// Extract the relevant storage information so that we can work with this entry.
pub fn entry(&self) -> Result<StorageEntryClient<'atblock, Client, T>, StorageError> {
StorageClient {
client: self.client,
marker: std::marker::PhantomData,
}
.entry(
self.entry.pallet().to_owned(),
self.entry.entry().to_owned(),
)
}
}
/// A client for working with a specific storage entry. This is an enum because the storage entry
/// might be either a map or a plain value, and each has a different interface.
pub enum StorageEntryClient<'atblock, Client, T> {
Plain(StorageEntryPlainClient<'atblock, Client, T>),
Map(StorageEntryMapClient<'atblock, Client, T>),
}
impl<'atblock, Client, T> StorageEntryClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OfflineClientAtBlockT<'atblock, T>,
{
/// Get the pallet name.
pub fn pallet_name(&self) -> &str {
match self {
StorageEntryClient::Plain(client) => &client.pallet_name,
StorageEntryClient::Map(client) => &client.pallet_name,
}
}
/// Get the storage entry name.
pub fn storage_name(&self) -> &str {
match self {
StorageEntryClient::Plain(client) => &client.storage_name,
StorageEntryClient::Map(client) => &client.storage_name,
}
}
/// Is the storage entry a plain value?
pub fn is_plain(&self) -> bool {
matches!(self, StorageEntryClient::Plain(_))
}
/// Is the storage entry a map?
pub fn is_map(&self) -> bool {
matches!(self, StorageEntryClient::Map(_))
}
/// If this storage entry is a plain value, return the client for working with it. Else return an error.
pub fn into_plain(
self,
) -> Result<StorageEntryPlainClient<'atblock, Client, T>, StorageEntryIsNotAPlainValue> {
match self {
StorageEntryClient::Plain(client) => Ok(client),
StorageEntryClient::Map(_) => Err(StorageEntryIsNotAPlainValue {
pallet_name: self.pallet_name().into(),
storage_name: self.storage_name().into(),
}),
}
}
/// If this storage entry is a map, return the client for working with it. Else return an error.
pub fn into_map(
self,
) -> Result<StorageEntryMapClient<'atblock, Client, T>, StorageEntryIsNotAMap> {
match self {
StorageEntryClient::Plain(_) => Err(StorageEntryIsNotAMap {
pallet_name: self.pallet_name().into(),
storage_name: self.storage_name().into(),
}),
StorageEntryClient::Map(client) => Ok(client),
}
}
}
/// A client for working with a plain storage entry.
pub struct StorageEntryPlainClient<'atblock, Client, T> {
client: &'atblock Client,
pallet_name: String,
storage_name: String,
info: AnyStorageInfo<'atblock>,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, Client, T> StorageEntryPlainClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OfflineClientAtBlockT<'atblock, T>,
{
/// Get the pallet name.
pub fn pallet_name(&self) -> &str {
&self.pallet_name
}
/// Get the storage entry name.
pub fn storage_name(&self) -> &str {
&self.storage_name
}
}
impl<'atblock, Client, T> StorageEntryPlainClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OnlineClientAtBlockT<'atblock, T>,
{
/// Fetch the value for this storage entry.
pub async fn fetch(&self) -> Result<Option<StorageValue<'_, 'atblock>>, StorageError> {
let key_bytes = self.key();
fetch(self.client, &key_bytes)
.await
.map(|v| v.map(|bytes| StorageValue::new(&self.info, bytes)))
}
/// The key for this storage entry.
pub fn key(&self) -> [u8; 32] {
let pallet_name = &*self.pallet_name;
let storage_name = &*self.storage_name;
frame_decode::storage::encode_prefix(pallet_name, storage_name)
}
}
/// A client for working with a storage entry that is a map.
pub struct StorageEntryMapClient<'atblock, Client, T> {
client: &'atblock Client,
pallet_name: String,
storage_name: String,
info: AnyStorageInfo<'atblock>,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, Client, T> StorageEntryMapClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OfflineClientAtBlockT<'atblock, T>,
{
/// Get the pallet name.
pub fn pallet_name(&self) -> &str {
&self.pallet_name
}
/// Get the storage entry name.
pub fn storage_name(&self) -> &str {
&self.storage_name
}
}
impl<'atblock, Client, T> StorageEntryMapClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OnlineClientAtBlockT<'atblock, T>,
{
/// Fetch a specific key in this map. If the number of keys provided is not equal
/// to the number of keys required to fetch a single value from the map, then an error
/// will be emitted.
pub async fn fetch<Keys: IntoStorageKeys>(
&self,
keys: Keys,
) -> Result<Option<StorageValue<'_, 'atblock>>, StorageError> {
let expected_num_keys = with_info!(info = &self.info => {
info.info.keys.len()
});
if expected_num_keys != keys.num_keys() {
return Err(StorageError::WrongNumberOfKeysProvided {
num_keys_provided: keys.num_keys(),
num_keys_expected: expected_num_keys,
});
}
let key_bytes = self.key(keys)?;
fetch(self.client, &key_bytes)
.await
.map(|v| v.map(|bytes| StorageValue::new(&self.info, bytes)))
}
/// Iterate over the values underneath the provided keys.
pub async fn iter<Keys: IntoStorageKeys>(
&self,
keys: Keys,
) -> Result<
impl futures::Stream<Item = Result<StorageEntry<'_, 'atblock>, StorageError>> + Unpin,
StorageError,
> {
use futures::stream::StreamExt;
use subxt_rpcs::methods::chain_head::{
ArchiveStorageEvent, StorageQuery, StorageQueryType,
};
let block_hash = self.client.block_hash();
let key_bytes = self.key(keys)?;
let items = std::iter::once(StorageQuery {
key: &*key_bytes,
query_type: StorageQueryType::DescendantsValues,
});
let sub = self
.client
.rpc_methods()
.archive_v1_storage(block_hash.into(), items, None)
.await
.map_err(|e| StorageError::RpcError { reason: e })?;
let sub = sub.filter_map(async |item| {
let item = match item {
Ok(ArchiveStorageEvent::Item(item)) => item,
Ok(ArchiveStorageEvent::Error(err)) => {
return Some(Err(StorageError::StorageEventError { reason: err.error }));
}
Ok(ArchiveStorageEvent::Done) => return None,
Err(e) => return Some(Err(StorageError::RpcError { reason: e })),
};
item.value
.map(|value| Ok(StorageEntry::new(&self.info, item.key.0, value.0)))
});
Ok(Box::pin(sub))
}
// Encode a storage key for this storage entry to bytes. The key can be a partial key
// (i.e there are still multiple values below it) or a complete key that points to a specific value.
//
// Dev note: We don't have any functions that can take an already-encoded key and fetch an entry from
// it yet, so we don't expose this. If we did expose it, we might want to return some struct that wraps
// the key bytes and some metadata about them. Or maybe just fetch_raw and iter_raw.
fn key<Keys: IntoStorageKeys>(&self, keys: Keys) -> Result<Vec<u8>, StorageError> {
with_info!(info = &self.info => {
let mut key_bytes = Vec::new();
frame_decode::storage::encode_storage_key_with_info_to(
&self.pallet_name,
&self.storage_name,
keys,
&info.info,
info.resolver,
&mut key_bytes,
).map_err(|e| StorageError::KeyEncodeError { reason: e })?;
Ok(key_bytes)
})
}
}
// Fetch a single storage value by its key.
async fn fetch<'atblock, Client, T>(
client: &Client,
key_bytes: &[u8],
) -> Result<Option<Vec<u8>>, StorageError>
where
T: Config + 'atblock,
Client: OnlineClientAtBlockT<'atblock, T>,
{
use subxt_rpcs::methods::chain_head::{ArchiveStorageEvent, StorageQuery, StorageQueryType};
let query = StorageQuery {
key: key_bytes,
query_type: StorageQueryType::Value,
};
let mut response_stream = client
.rpc_methods()
.archive_v1_storage(client.block_hash().into(), std::iter::once(query), None)
.await
.map_err(|e| StorageError::RpcError { reason: e })?;
let value = response_stream
.next()
.await
.transpose()
.map_err(|e| StorageError::RpcError { reason: e })?;
// No value found.
let Some(value) = value else {
return Ok(None);
};
let item = match value {
ArchiveStorageEvent::Item(item) => item,
// if it errors, return the error:
ArchiveStorageEvent::Error(err) => {
return Err(StorageError::StorageEventError { reason: err.error });
}
// if it's done, it means no value was returned:
ArchiveStorageEvent::Done => return Ok(None),
};
// This shouldn't happen, but if it does, the value we wanted wasn't found.
if item.key.0 != key_bytes {
return Ok(None);
}
// The bytes for the storage value. If this is None, then the API is misbehaving,
// ot no matching value was found.
let Some(value_bytes) = item.value else {
return Ok(None);
};
Ok(Some(value_bytes.0))
}
+48
View File
@@ -0,0 +1,48 @@
use super::storage_info::AnyStorageInfo;
use super::storage_key::StorageKey;
use super::storage_value::StorageValue;
use crate::error::{StorageKeyError, StorageValueError};
use scale_decode::DecodeAsType;
/// This represents a storage entry, which is a key-value pair in the storage.
pub struct StorageEntry<'entry, 'atblock> {
key: Vec<u8>,
// This contains the storage information already:
value: StorageValue<'entry, 'atblock>,
}
impl<'entry, 'atblock> StorageEntry<'entry, 'atblock> {
/// Create a new storage entry.
pub fn new(info: &'entry AnyStorageInfo<'atblock>, key: Vec<u8>, value: Vec<u8>) -> Self {
Self {
key,
value: StorageValue::new(info, value),
}
}
/// Get the raw bytes for this storage entry's key.
pub fn key_bytes(&self) -> &[u8] {
&self.key
}
/// Get the raw bytes for this storage entry's value.
pub fn value_bytes(&self) -> &[u8] {
self.value.bytes()
}
/// Consume this storage entry and return the raw bytes for the key and value.
pub fn into_key_and_value_bytes(self) -> (Vec<u8>, Vec<u8>) {
(self.key, self.value.into_bytes())
}
/// Decode the key for this storage entry. This gives back a type from which we can
/// decode specific parts of the key hash (where applicable).
pub fn decode_key(&'_ self) -> Result<StorageKey<'_, 'atblock>, StorageKeyError> {
StorageKey::new(self.value.info, &self.key)
}
/// Decode this storage value.
pub fn decode_value<T: DecodeAsType>(&self) -> Result<T, StorageValueError> {
self.value.decode::<T>()
}
}
+102
View File
@@ -0,0 +1,102 @@
use crate::error::StorageError;
use frame_decode::storage::StorageTypeInfo;
use frame_metadata::RuntimeMetadata;
use scale_info_legacy::{LookupName, TypeRegistrySet};
pub enum AnyStorageInfo<'atblock> {
Legacy(StorageInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(StorageInfo<'atblock, u32, scale_info::PortableRegistry>),
}
impl<'atblock> AnyStorageInfo<'atblock> {
/// For a slice of storage entries, return a vec of information about each one.
pub fn new(
pallet_name: &str,
entry_name: &str,
metadata: &'atblock RuntimeMetadata,
legacy_types: &'atblock TypeRegistrySet<'atblock>,
) -> Result<Self, StorageError> {
let info = match metadata {
RuntimeMetadata::V8(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V9(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V10(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V11(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V12(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V13(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V14(m) => storage_info_inner(pallet_name, entry_name, m, &m.types),
RuntimeMetadata::V15(m) => storage_info_inner(pallet_name, entry_name, m, &m.types),
RuntimeMetadata::V16(m) => storage_info_inner(pallet_name, entry_name, m, &m.types),
unknown => {
return Err(StorageError::UnsupportedMetadataVersion {
version: unknown.version(),
});
}
}?;
fn storage_info_inner<'atblock, Info, Resolver>(
pallet_name: &str,
entry_name: &str,
m: &'atblock Info,
type_resolver: &'atblock Resolver,
) -> Result<AnyStorageInfo<'atblock>, StorageError>
where
Info: StorageTypeInfo,
Resolver: scale_type_resolver::TypeResolver<TypeId = Info::TypeId>,
AnyStorageInfo<'atblock>: From<StorageInfo<'atblock, Info::TypeId, Resolver>>,
{
m.get_storage_info(pallet_name, entry_name)
.map(|frame_storage_info| {
let info = StorageInfo {
info: frame_storage_info,
resolver: type_resolver,
};
AnyStorageInfo::from(info)
})
.map_err(|e| StorageError::ExtractStorageInfoError {
reason: e.into_owned(),
})
}
Ok(info)
}
/// Is the storage entry a map (ie something we'd provide extra keys to to access a value, or otherwise iterate over)?
pub fn is_map(&self) -> bool {
match self {
AnyStorageInfo::Legacy(info) => !info.info.keys.is_empty(),
AnyStorageInfo::Current(info) => !info.info.keys.is_empty(),
}
}
}
impl<'atblock> From<StorageInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>>
for AnyStorageInfo<'atblock>
{
fn from(info: StorageInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>) -> Self {
AnyStorageInfo::Legacy(info)
}
}
impl<'atblock> From<StorageInfo<'atblock, u32, scale_info::PortableRegistry>>
for AnyStorageInfo<'atblock>
{
fn from(info: StorageInfo<'atblock, u32, scale_info::PortableRegistry>) -> Self {
AnyStorageInfo::Current(info)
}
}
pub struct StorageInfo<'atblock, TypeId, Resolver> {
pub info: frame_decode::storage::StorageInfo<TypeId>,
pub resolver: &'atblock Resolver,
}
macro_rules! with_info {
($info:ident = $original_info:expr => $fn:expr) => {{
#[allow(clippy::clone_on_copy)]
let info = match $original_info {
AnyStorageInfo::Legacy($info) => $fn,
AnyStorageInfo::Current($info) => $fn,
};
info
}};
}
pub(crate) use with_info;
+159
View File
@@ -0,0 +1,159 @@
use super::AnyStorageInfo;
use crate::{error::StorageKeyError, storage::storage_info::with_info};
use scale_info_legacy::{LookupName, TypeRegistrySet};
// This is part of our public interface.
pub use frame_decode::storage::StorageHasher;
enum AnyStorageKeyInfo<'atblock> {
Legacy(StorageKeyInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(StorageKeyInfo<'atblock, u32, scale_info::PortableRegistry>),
}
impl<'atblock> From<StorageKeyInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>>
for AnyStorageKeyInfo<'atblock>
{
fn from(info: StorageKeyInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>) -> Self {
AnyStorageKeyInfo::Legacy(info)
}
}
impl<'atblock> From<StorageKeyInfo<'atblock, u32, scale_info::PortableRegistry>>
for AnyStorageKeyInfo<'atblock>
{
fn from(info: StorageKeyInfo<'atblock, u32, scale_info::PortableRegistry>) -> Self {
AnyStorageKeyInfo::Current(info)
}
}
struct StorageKeyInfo<'atblock, TypeId, Resolver> {
info: frame_decode::storage::StorageKey<TypeId>,
resolver: &'atblock Resolver,
}
macro_rules! with_key_info {
($info:ident = $original_info:expr => $fn:expr) => {{
#[allow(clippy::clone_on_copy)]
let info = match $original_info {
AnyStorageKeyInfo::Legacy($info) => $fn,
AnyStorageKeyInfo::Current($info) => $fn,
};
info
}};
}
/// This represents the different parts of a storage key.
pub struct StorageKey<'entry, 'atblock> {
info: AnyStorageKeyInfo<'atblock>,
bytes: &'entry [u8],
}
impl<'entry, 'atblock> StorageKey<'entry, 'atblock> {
pub(crate) fn new(
info: &AnyStorageInfo<'atblock>,
bytes: &'entry [u8],
) -> Result<Self, StorageKeyError> {
with_info!(info = info => {
let cursor = &mut &*bytes;
let storage_key_info = frame_decode::storage::decode_storage_key_with_info(
cursor,
&info.info,
info.resolver,
).map_err(|e| {
StorageKeyError::DecodeError { reason: e.map_type_id(|id| id.to_string()) }
})?;
if !cursor.is_empty() {
return Err(StorageKeyError::LeftoverBytes {
leftover_bytes: cursor.to_vec(),
});
}
Ok(StorageKey {
info: StorageKeyInfo {
info: storage_key_info,
resolver: info.resolver,
}.into(),
bytes,
})
})
}
/// Iterate over the parts of this storage key. Each part of a storage key corresponds to a
/// single value that has been hashed.
pub fn parts(&'_ self) -> impl ExactSizeIterator<Item = StorageKeyPart<'_, 'entry, 'atblock>> {
let parts_len = with_key_info!(info = &self.info => info.info.parts().len());
(0..parts_len).map(move |index| StorageKeyPart {
index,
info: &self.info,
bytes: self.bytes,
})
}
/// Return the part of the storage key at the provided index, or `None` if the index is out of bounds.
pub fn part(&self, index: usize) -> Option<StorageKeyPart<'_, 'entry, 'atblock>> {
if index < self.parts().len() {
Some(StorageKeyPart {
index,
info: &self.info,
bytes: self.bytes,
})
} else {
None
}
}
}
/// This represents a part of a storage key.
pub struct StorageKeyPart<'key, 'entry, 'atblock> {
index: usize,
info: &'key AnyStorageKeyInfo<'atblock>,
bytes: &'entry [u8],
}
impl<'key, 'entry, 'atblock> StorageKeyPart<'key, 'entry, 'atblock> {
/// Get the raw bytes for this part of the storage key.
pub fn bytes(&self) -> &'entry [u8] {
with_key_info!(info = &self.info => {
let part = &info.info[self.index];
let hash_range = part.hash_range();
let value_range = part
.value()
.map(|v| v.range())
.unwrap_or(std::ops::Range { start: hash_range.end, end: hash_range.end });
let combined_range = std::ops::Range {
start: hash_range.start,
end: value_range.end,
};
&self.bytes[combined_range]
})
}
/// Get the hasher that was used to construct this part of the storage key.
pub fn hasher(&self) -> StorageHasher {
with_key_info!(info = &self.info => info.info[self.index].hasher())
}
/// For keys that were produced using "concat" or "identity" hashers, the value
/// is available as a part of the key hash, allowing us to decode it into anything
/// implementing [`scale_decode::DecodeAsType`]. If the key was produced using a
/// different hasher, this will return `None`.
pub fn decode<T: scale_decode::DecodeAsType>(&self) -> Result<Option<T>, StorageKeyError> {
with_key_info!(info = &self.info => {
let part_info = &info.info[self.index];
let Some(value_info) = part_info.value() else {
return Ok(None);
};
let value_bytes = &self.bytes[value_info.range()];
let value_ty = value_info.ty().clone();
let decoded_key_part = T::decode_as_type(
&mut &*value_bytes,
value_ty,
info.resolver,
).map_err(|e| StorageKeyError::DecodePartError { index: self.index, reason: e })?;
Ok(Some(decoded_key_part))
})
}
}
+48
View File
@@ -0,0 +1,48 @@
use super::storage_info::AnyStorageInfo;
use super::storage_info::with_info;
use crate::error::StorageValueError;
use scale_decode::DecodeAsType;
/// This represents a storage value.
pub struct StorageValue<'entry, 'atblock> {
pub(crate) info: &'entry AnyStorageInfo<'atblock>,
bytes: Vec<u8>,
}
impl<'entry, 'atblock> StorageValue<'entry, 'atblock> {
/// Create a new storage value.
pub fn new(info: &'entry AnyStorageInfo<'atblock>, bytes: Vec<u8>) -> Self {
Self { info, bytes }
}
/// Get the raw bytes for this storage value.
pub fn bytes(&self) -> &[u8] {
&self.bytes
}
/// Consume this storage value and return the raw bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.bytes
}
/// Decode this storage value.
pub fn decode<T: DecodeAsType>(&self) -> Result<T, StorageValueError> {
with_info!(info = &self.info => {
let cursor = &mut &*self.bytes;
let value = T::decode_as_type(
cursor,
info.info.value_id.clone(),
info.resolver,
).map_err(|e| StorageValueError::DecodeError { reason: e })?;
if !cursor.is_empty() {
return Err(StorageValueError::LeftoverBytes {
leftover_bytes: cursor.to_vec(),
});
}
Ok(value)
})
}
}
+5
View File
@@ -0,0 +1,5 @@
mod either;
mod range_map;
pub use either::Either;
pub use range_map::RangeMap;
+48
View File
@@ -0,0 +1,48 @@
macro_rules! either {
($name:ident( $fst:ident, $($variant:ident),* )) => {
pub enum $name<$fst, $($variant),*> {
$fst($fst),
$($variant($variant),)*
}
impl<$fst, $($variant),*> Iterator for $name<$fst, $($variant),*>
where
$fst: Iterator,
$($variant: Iterator<Item = $fst::Item>,)*
{
type Item = $fst::Item;
fn next(&mut self) -> Option<Self::Item> {
match self {
$name::$fst(inner) => inner.next(),
$( $name::$variant(inner) => inner.next(), )*
}
}
}
impl <$fst, $($variant),*> futures::stream::Stream for $name<$fst, $($variant),*>
where
$fst: futures::stream::Stream,
$($variant: futures::stream::Stream<Item = $fst::Item>,)*
{
type Item = $fst::Item;
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
use std::pin::Pin;
// SAFETY: This is safe because we never move the inner value out of the Pin.
unsafe {
match self.get_unchecked_mut() {
$name::$fst(inner) => Pin::new_unchecked(inner).poll_next(cx),
$( $name::$variant(inner) => Pin::new_unchecked(inner).poll_next(cx), )*
}
}
}
}
}
}
either!(Either(A, B));
+158
View File
@@ -0,0 +1,158 @@
use std::fmt::Display;
/// A map that associates ranges of keys with values.
#[derive(Debug, Clone)]
pub struct RangeMap<K, V> {
// (range_start, range_ended, value). This is
// guaranteed to be sorted and have non-overlapping ranges.
mapping: Vec<(K, K, V)>,
}
impl<K: Clone + Copy + Display + PartialOrd + Ord, V> RangeMap<K, V> {
/// Build an empty [`RangeMap`] as a placeholder.
pub fn empty() -> Self {
RangeMap {
mapping: Vec::new(),
}
}
/// Build a [`RangeMap`].
pub fn builder() -> RangeMapBuilder<K, V> {
RangeMapBuilder {
mapping: Vec::new(),
}
}
/// Return the value whose key is within the range, or None if not found.
pub fn get(&self, key: K) -> Option<&V> {
let idx = self
.mapping
.binary_search_by_key(&key, |&(start, end, _)| {
if key >= start && key < end {
key
} else {
start
}
})
.ok()?;
self.mapping.get(idx).map(|(_, _, val)| val)
}
}
/// A builder for constructing a [`RangeMap`]. Use [``RangeMap::builder()`] to create one.
#[derive(Debug, Clone)]
pub struct RangeMapBuilder<K, V> {
mapping: Vec<(K, K, V)>,
}
impl<K: Clone + Copy + Display + PartialOrd + Ord, V> RangeMapBuilder<K, V> {
/// Try to add a range, mapping block numbers to a spec version.
///
/// Returns an error if the range is empty or overlaps with an existing range.
pub fn try_add_range(
&mut self,
start: K,
end: K,
val: V,
) -> Result<&mut Self, RangeMapError<K>> {
let (start, end) = if start < end {
(start, end)
} else {
(end, start)
};
if start == end {
return Err(RangeMapError::EmptyRange(start));
}
if let Some(&(s, e, _)) = self
.mapping
.iter()
.find(|&&(s, e, _)| (start < e && end > s))
{
return Err(RangeMapError::OverlappingRanges {
proposed: (start, end),
existing: (s, e),
});
}
self.mapping.push((start, end, val));
Ok(self)
}
/// Add a range of blocks with the given spec version.
///
/// # Panics
///
/// This method will panic if the range is empty or overlaps with an existing range.
pub fn add_range(mut self, start: K, end: K, val: V) -> Self {
if let Err(e) = self.try_add_range(start, end, val) {
panic!("{e}")
}
self
}
/// Finish adding ranges and build the [`RangeMap`].
pub fn build(mut self) -> RangeMap<K, V> {
self.mapping.sort_by_key(|&(start, _, _)| start);
RangeMap {
mapping: self.mapping,
}
}
}
/// An error that can occur when calling [`RangeMapBuilder::try_add_range()`].
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
pub enum RangeMapError<K: Display> {
/// An error indicating that the proposed block range is empty.
#[error("Block range cannot be empty: start and end values must be different, but got {} for both", .0)]
EmptyRange(K),
/// An error indicating that the proposed block range overlaps with an existing one.
#[error("Overlapping block ranges are not allowed: proposed range is {}..{}, but we already have {}..{}", proposed.0, proposed.1, existing.0, existing.1)]
OverlappingRanges { proposed: (K, K), existing: (K, K) },
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_rangemap_get() {
let spec_version = RangeMap::builder()
.add_range(0, 100, 1)
.add_range(100, 200, 2)
.add_range(200, 300, 3)
.build();
assert_eq!(spec_version.get(0), Some(&1));
assert_eq!(spec_version.get(50), Some(&1));
assert_eq!(spec_version.get(100), Some(&2));
assert_eq!(spec_version.get(150), Some(&2));
assert_eq!(spec_version.get(200), Some(&3));
assert_eq!(spec_version.get(250), Some(&3));
assert_eq!(spec_version.get(300), None);
}
#[test]
fn test_rangemap_set() {
let mut spec_version = RangeMap::builder()
.add_range(0, 100, 1)
.add_range(200, 300, 3);
assert_eq!(
spec_version.try_add_range(99, 130, 2).unwrap_err(),
RangeMapError::OverlappingRanges {
proposed: (99, 130),
existing: (0, 100),
}
);
assert_eq!(
spec_version.try_add_range(170, 201, 2).unwrap_err(),
RangeMapError::OverlappingRanges {
proposed: (170, 201),
existing: (200, 300),
}
);
}
}
+53
View File
@@ -14,6 +14,11 @@ use jsonrpsee::{
};
use serde_json::value::RawValue;
/// Construct a `jsonrpsee` RPC client with some sane defaults.
pub async fn client(url: &str) -> Result<Client, Error> {
jsonrpsee_helpers::client(url).await.map_err(|e| Error::Client(Box::new(e)))
}
struct Params(Option<Box<RawValue>>);
impl ToRpcParams for Params {
@@ -82,3 +87,51 @@ impl From<JsonrpseeError> for Error {
}
}
}
// helpers for a jsonrpsee specific RPC client.
#[cfg(all(feature = "jsonrpsee", feature = "native"))]
mod jsonrpsee_helpers {
pub use jsonrpsee::{
client_transport::ws::{self, EitherStream, Url, WsTransportClientBuilder},
core::client::{Client, Error},
};
use tokio_util::compat::Compat;
pub type Sender = ws::Sender<Compat<EitherStream>>;
pub type Receiver = ws::Receiver<Compat<EitherStream>>;
/// Build WS RPC client from URL
pub async fn client(url: &str) -> Result<Client, Error> {
let (sender, receiver) = ws_transport(url).await?;
Ok(Client::builder()
.max_buffer_capacity_per_subscription(4096)
.build_with_tokio(sender, receiver))
}
async fn ws_transport(url: &str) -> Result<(Sender, Receiver), Error> {
let url = Url::parse(url).map_err(|e| Error::Transport(e.into()))?;
WsTransportClientBuilder::default()
.build(url)
.await
.map_err(|e| Error::Transport(e.into()))
}
}
// helpers for a jsonrpsee specific RPC client.
#[cfg(all(feature = "jsonrpsee", feature = "web", target_arch = "wasm32"))]
mod jsonrpsee_helpers {
pub use jsonrpsee::{
client_transport::web,
core::client::{Client, ClientBuilder, Error},
};
/// Build web RPC client from URL
pub async fn client(url: &str) -> Result<Client, Error> {
let (sender, receiver) = web::connect(url)
.await
.map_err(|e| Error::Transport(e.into()))?;
Ok(ClientBuilder::default()
.max_buffer_capacity_per_subscription(4096)
.build_with_wasm(sender, receiver))
}
}
+5
View File
@@ -28,11 +28,13 @@
crate::macros::cfg_jsonrpsee! {
mod jsonrpsee_impl;
pub use jsonrpsee::core::client::Client as JsonrpseeRpcClient;
pub use jsonrpsee_impl::client as jsonrpsee_client;
}
crate::macros::cfg_unstable_light_client! {
mod lightclient_impl;
pub use subxt_lightclient::LightClientRpc as LightClientRpcClient;
pub use subxt_lightclient::LightClient;
}
crate::macros::cfg_reconnecting_rpc_client! {
@@ -45,6 +47,9 @@ crate::macros::cfg_mock_rpc_client! {
pub use mock_rpc_client::MockRpcClient;
}
pub mod round_robin_rpc_client;
pub use round_robin_rpc_client::RoundRobinRpcClient;
mod rpc_client;
mod rpc_client_t;
+94
View File
@@ -0,0 +1,94 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
//! This module exposes a [`RoundRobinRpcClient`], which is useful for load balancing
//! requests across multiple RPC clients.
//!
//! # Example
//!
//! ```rust,no_run
//! # async fn foo() -> Result<(), Box<dyn std::error::Error>> {
//! use subxt_rpcs::client::{RpcClient, RoundRobinRpcClient, jsonrpsee_client};
//!
//! // Construct some RpcClients (we'll make some jsonrpsee clients here, but
//! // you could use anything which implements `RpcClientT`).
//! let client1 = jsonrpsee_client("http://localhost:8080").await.unwrap();
//! let client2 = jsonrpsee_client("http://localhost:8081").await.unwrap();
//! let client3 = jsonrpsee_client("http://localhost:8082").await.unwrap();
//!
//! let round_robin_client = RoundRobinRpcClient::new(vec![client1, client2, client3]);
//!
//! // Build an RPC Client that can be used in Subxt or in conjunction with
//! // the RPC methods provided in this crate.
//! let rpc_client = RpcClient::new(round_robin_client);
//! # Ok(())
//! # }
//! ```
use super::{RawRpcFuture, RawRpcSubscription, RpcClientT};
use std::sync::{
Arc,
atomic::{AtomicUsize, Ordering},
};
/// A simple RPC client which is provided a set of clients on initialization and
/// will round-robin through them for each request.
#[derive(Clone, Debug)]
pub struct RoundRobinRpcClient<Client> {
inner: Arc<RoundRobinRpcClientInner<Client>>,
}
#[derive(Debug)]
struct RoundRobinRpcClientInner<Client> {
clients: Vec<Client>,
next_index: AtomicUsize,
}
impl<Client: RpcClientT> RoundRobinRpcClient<Client> {
/// Create a new `RoundRobinRpcClient` with the given clients.
///
/// # Panics
///
/// Panics if the `clients` vector is empty.
pub fn new(clients: Vec<Client>) -> Self {
assert!(!clients.is_empty(), "At least one client must be provided");
Self {
inner: Arc::new(RoundRobinRpcClientInner {
clients,
next_index: AtomicUsize::new(0),
}),
}
}
fn next_client(&self) -> &Client {
let idx = self.next_index();
&self.inner.clients[idx]
}
fn next_index(&self) -> usize {
// Note: fetch_add wraps on overflow so no need to handle this.
self.inner.next_index.fetch_add(1, Ordering::Relaxed) % self.inner.clients.len()
}
}
impl<Client: RpcClientT> RpcClientT for RoundRobinRpcClient<Client> {
fn request_raw<'a>(
&'a self,
method: &'a str,
params: Option<Box<serde_json::value::RawValue>>,
) -> RawRpcFuture<'a, Box<serde_json::value::RawValue>> {
let client = self.next_client();
client.request_raw(method, params)
}
fn subscribe_raw<'a>(
&'a self,
sub: &'a str,
params: Option<Box<serde_json::value::RawValue>>,
unsub: &'a str,
) -> RawRpcFuture<'a, RawRpcSubscription> {
let client = self.next_client();
client.subscribe_raw(sub, params, unsub)
}
}
+1 -49
View File
@@ -32,7 +32,7 @@ impl RpcClient {
///
/// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs).
pub async fn from_insecure_url<U: AsRef<str>>(url: U) -> Result<Self, Error> {
let client = jsonrpsee_helpers::client(url.as_ref())
let client = super::jsonrpsee_client(url.as_ref())
.await
.map_err(|e| Error::Client(Box::new(e)))?;
Ok(Self::new(client))
@@ -242,51 +242,3 @@ impl<Res: DeserializeOwned> Stream for RpcSubscription<Res> {
Poll::Ready(res)
}
}
// helpers for a jsonrpsee specific RPC client.
#[cfg(all(feature = "jsonrpsee", feature = "native"))]
mod jsonrpsee_helpers {
pub use jsonrpsee::{
client_transport::ws::{self, EitherStream, Url, WsTransportClientBuilder},
core::client::{Client, Error},
};
use tokio_util::compat::Compat;
pub type Sender = ws::Sender<Compat<EitherStream>>;
pub type Receiver = ws::Receiver<Compat<EitherStream>>;
/// Build WS RPC client from URL
pub async fn client(url: &str) -> Result<Client, Error> {
let (sender, receiver) = ws_transport(url).await?;
Ok(Client::builder()
.max_buffer_capacity_per_subscription(4096)
.build_with_tokio(sender, receiver))
}
async fn ws_transport(url: &str) -> Result<(Sender, Receiver), Error> {
let url = Url::parse(url).map_err(|e| Error::Transport(e.into()))?;
WsTransportClientBuilder::default()
.build(url)
.await
.map_err(|e| Error::Transport(e.into()))
}
}
// helpers for a jsonrpsee specific RPC client.
#[cfg(all(feature = "jsonrpsee", feature = "web", target_arch = "wasm32"))]
mod jsonrpsee_helpers {
pub use jsonrpsee::{
client_transport::web,
core::client::{Client, ClientBuilder, Error},
};
/// Build web RPC client from URL
pub async fn client(url: &str) -> Result<Client, Error> {
let (sender, receiver) = web::connect(url)
.await
.map_err(|e| Error::Transport(e.into()))?;
Ok(ClientBuilder::default()
.max_buffer_capacity_per_subscription(4096)
.build_with_wasm(sender, receiver))
}
}
+1 -1
View File
@@ -15,7 +15,7 @@ description = "subxt utility to strip metadata"
[dependencies]
codec = { workspace = true }
frame-metadata = { workspace = true, features = ["std"] }
frame-metadata = { workspace = true, features = ["std", "current"] }
scale-info = { workspace = true, features = ["std"] }
either = { workspace = true }