Add subxt-historic crate for accesing historic (non head-of-chain) blocks (#2040)

* WIP subxt-historic

* WIP subxt-historic

* WIP subxt-historic; flesh out basic foundations

* WIP filling in extrinsic decoding functionality

* iter and decode transaction extensions

* Fill in the Online/OfflineClient APIs and move more things to be part of the chain Config

* WIP storage

* clippy, fmt, finish extrinsics example

* prep for 0.0.1 release to claim crate name

* fix README link

* fmt

* WIP thinking about storage APIs

* WIP working out storage APIs

* Storage plain value fetching first pass

* WIP storage: first pass iterating over values done

* First apss finishing storage APIs

* fmt and clippy

* Create a storage example showing fetch and iteration

* Bump to frame-decode 0.9.0

* Bump subxt-historic to 0.0.3 for preview release

* Remove unused deps

* fix import

* clippy

* doc fixes

* tweak CI and fix some cargo hack findings

* Update README: subxt-historic is prerelease
This commit is contained in:
James Wilson
2025-08-26 17:56:21 +01:00
committed by GitHub
parent 23f3ebe6b5
commit 3aabd6dc09
33 changed files with 3220 additions and 55 deletions
+47
View File
@@ -0,0 +1,47 @@
mod offline_client;
mod online_client;
use crate::config::Config;
use crate::extrinsics::ExtrinsicsClient;
use crate::storage::StorageClient;
use std::marker::PhantomData;
// We keep these traits internal, so that we can mess with them later if needed,
// and instead only the concrete types are public which wrap these trait impls.
pub(crate) use offline_client::OfflineClientAtBlockT;
pub(crate) use online_client::OnlineClientAtBlockT;
pub use offline_client::OfflineClient;
pub use online_client::OnlineClient;
/// This represents a client at a specific block number.
pub struct ClientAtBlock<Client, T> {
client: Client,
marker: PhantomData<T>,
}
impl<Client, T> ClientAtBlock<Client, T> {
/// Construct a new client at some block.
pub(crate) fn new(client: Client) -> Self {
Self {
client,
marker: PhantomData,
}
}
}
impl<'client, T, Client> ClientAtBlock<Client, T>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
/// Work with extrinsics.
pub fn extrinsics(&'_ self) -> ExtrinsicsClient<'_, Client, T> {
ExtrinsicsClient::new(&self.client)
}
/// Work with storage.
pub fn storage(&'_ self) -> StorageClient<'_, Client, T> {
StorageClient::new(&self.client)
}
}
+85
View File
@@ -0,0 +1,85 @@
use super::ClientAtBlock;
use crate::config::Config;
use crate::error::OfflineClientAtBlockError;
use frame_metadata::RuntimeMetadata;
use scale_info_legacy::TypeRegistrySet;
use std::sync::Arc;
/// A client which exposes the means to decode historic data on a chain offline.
#[derive(Clone, Debug)]
pub struct OfflineClient<T: Config> {
/// The configuration for this client.
config: Arc<T>,
}
impl<T: Config> OfflineClient<T> {
/// Create a new [`OfflineClient`] with the given configuration.
pub fn new(config: T) -> Self {
OfflineClient {
config: Arc::new(config),
}
}
/// Pick the block height at which to operate. This references data from the
/// [`OfflineClient`] it's called on, and so cannot outlive it.
pub fn at<'this>(
&'this self,
block_number: u64,
) -> Result<ClientAtBlock<OfflineClientAtBlock<'this, T>, T>, OfflineClientAtBlockError> {
let config = &self.config;
let spec_version = self
.config
.spec_version_for_block_number(block_number)
.ok_or(OfflineClientAtBlockError::SpecVersionNotFound { block_number })?;
let legacy_types = self.config.legacy_types_for_spec_version(spec_version);
let metadata = self
.config
.metadata_for_spec_version(spec_version)
.ok_or(OfflineClientAtBlockError::MetadataNotFound { spec_version })?;
Ok(ClientAtBlock::new(OfflineClientAtBlock {
config,
legacy_types,
metadata,
}))
}
}
/// This represents an offline-only client at a specific block.
#[doc(hidden)]
pub trait OfflineClientAtBlockT<'client, T: Config + 'client> {
/// Get the configuration for this client.
fn config(&self) -> &'client T;
/// Get the legacy types that work at this block.
fn legacy_types(&'_ self) -> &TypeRegistrySet<'client>;
/// Get the metadata appropriate for this block.
fn metadata(&self) -> &RuntimeMetadata;
}
// Dev note: this shouldn't need to be exposed unless there is some
// need to explicitly name the ClientAAtBlock type. Rather keep it
// private to allow changes if possible.
#[doc(hidden)]
pub struct OfflineClientAtBlock<'client, T: Config + 'client> {
/// The configuration for thie chain.
config: &'client T,
/// Historic types to use at this block number.
legacy_types: TypeRegistrySet<'client>,
/// Metadata to use at this block number.
metadata: Arc<RuntimeMetadata>,
}
impl<'client, T: Config + 'client> OfflineClientAtBlockT<'client, T>
for OfflineClientAtBlock<'client, T>
{
fn config(&self) -> &'client T {
self.config
}
fn legacy_types(&self) -> &TypeRegistrySet<'client> {
&self.legacy_types
}
fn metadata(&self) -> &RuntimeMetadata {
&self.metadata
}
}
+327
View File
@@ -0,0 +1,327 @@
use super::ClientAtBlock;
use crate::client::OfflineClientAtBlockT;
use crate::config::Config;
use crate::error::OnlineClientAtBlockError;
use codec::{Compact, Decode, Encode};
use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed};
use scale_info_legacy::TypeRegistrySet;
use std::sync::Arc;
use subxt_rpcs::methods::chain_head::ArchiveCallResult;
use subxt_rpcs::{ChainHeadRpcMethods, RpcClient};
#[cfg(feature = "jsonrpsee")]
#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))]
use crate::error::OnlineClientError;
/// A client which exposes the means to decode historic data on a chain online.
#[derive(Clone, Debug)]
pub struct OnlineClient<T: Config> {
inner: Arc<OnlineClientInner<T>>,
}
#[derive(Debug)]
struct OnlineClientInner<T: Config> {
/// The configuration for this client.
config: T,
/// The RPC methods used to communicate with the node.
rpc_methods: ChainHeadRpcMethods<T>,
}
// The default constructors assume Jsonrpsee.
#[cfg(feature = "jsonrpsee")]
#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))]
impl<T: Config> OnlineClient<T> {
/// Construct a new [`OnlineClient`] using default settings which
/// point to a locally running node on `ws://127.0.0.1:9944`.
///
/// **Note:** This will only work if the local node is an archive node.
pub async fn new(config: T) -> Result<OnlineClient<T>, OnlineClientError> {
let url = "ws://127.0.0.1:9944";
OnlineClient::from_url(config, url).await
}
/// Construct a new [`OnlineClient`], providing a URL to connect to.
pub async fn from_url(
config: T,
url: impl AsRef<str>,
) -> Result<OnlineClient<T>, OnlineClientError> {
let url_str = url.as_ref();
let url = url::Url::parse(url_str).map_err(|_| OnlineClientError::InvalidUrl {
url: url_str.to_string(),
})?;
if !Self::is_url_secure(&url) {
return Err(OnlineClientError::RpcClientError(
subxt_rpcs::Error::InsecureUrl(url_str.to_string()),
));
}
OnlineClient::from_insecure_url(config, url).await
}
/// Construct a new [`OnlineClient`], providing a URL to connect to.
///
/// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs).
pub async fn from_insecure_url(
config: T,
url: impl AsRef<str>,
) -> Result<OnlineClient<T>, OnlineClientError> {
let rpc_client = RpcClient::from_insecure_url(url).await?;
Ok(OnlineClient::from_rpc_client(config, rpc_client))
}
fn is_url_secure(url: &url::Url) -> bool {
let secure_scheme = url.scheme() == "https" || url.scheme() == "wss";
let is_localhost = url.host().is_some_and(|e| match e {
url::Host::Domain(e) => e == "localhost",
url::Host::Ipv4(e) => e.is_loopback(),
url::Host::Ipv6(e) => e.is_loopback(),
});
secure_scheme || is_localhost
}
}
impl<T: Config> OnlineClient<T> {
/// Construct a new [`OnlineClient`] by providing an [`RpcClient`] to drive the connection,
/// and some configuration for the chain we're connecting to.
pub fn from_rpc_client(config: T, rpc_client: impl Into<RpcClient>) -> OnlineClient<T> {
let rpc_client = rpc_client.into();
let rpc_methods = ChainHeadRpcMethods::new(rpc_client);
OnlineClient {
inner: Arc::new(OnlineClientInner {
config,
rpc_methods,
}),
}
}
/// Pick the block height at which to operate. This references data from the
/// [`OnlineClient`] it's called on, and so cannot outlive it.
pub async fn at(
&'_ self,
block_number: u64,
) -> Result<ClientAtBlock<OnlineClientAtBlock<'_, T>, T>, OnlineClientAtBlockError> {
let config = &self.inner.config;
let rpc_methods = &self.inner.rpc_methods;
let block_hash = rpc_methods
.archive_v1_hash_by_height(block_number as usize)
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetBlockHash {
block_number,
reason: e,
})?
.pop()
.ok_or_else(|| OnlineClientAtBlockError::BlockNotFound { block_number })?
.into();
// Get our configuration, or fetch from the node if not available.
let spec_version =
if let Some(spec_version) = config.spec_version_for_block_number(block_number) {
spec_version
} else {
// Fetch spec version. Caching this doesn't really make sense, so either
// details are provided offline or we fetch them every time.
get_spec_version(rpc_methods, block_hash).await?
};
let metadata = if let Some(metadata) = config.metadata_for_spec_version(spec_version) {
metadata
} else {
// Fetch and then give our config the opportunity to cache this metadata.
let metadata = get_metadata(rpc_methods, block_hash).await?;
let metadata = Arc::new(metadata);
config.set_metadata_for_spec_version(spec_version, metadata.clone());
metadata
};
let historic_types = config.legacy_types_for_spec_version(spec_version);
Ok(ClientAtBlock::new(OnlineClientAtBlock {
config,
historic_types,
metadata,
rpc_methods,
block_hash,
}))
}
}
/// This represents an online client at a specific block.
#[doc(hidden)]
pub trait OnlineClientAtBlockT<'client, T: Config + 'client>:
OfflineClientAtBlockT<'client, T>
{
/// Return the RPC methods we'll use to interact with the node.
fn rpc_methods(&self) -> &ChainHeadRpcMethods<T>;
/// Return the block hash for the current block.
fn block_hash(&self) -> <T as Config>::Hash;
}
// Dev note: this shouldn't need to be exposed unless there is some
// need to explicitly name the ClientAAtBlock type. Rather keep it
// private to allow changes if possible.
#[doc(hidden)]
pub struct OnlineClientAtBlock<'client, T: Config + 'client> {
/// The configuration for this chain.
config: &'client T,
/// Historic types to use at this block number.
historic_types: TypeRegistrySet<'client>,
/// Metadata to use at this block number.
metadata: Arc<RuntimeMetadata>,
/// We also need RPC methods for online interactions.
rpc_methods: &'client ChainHeadRpcMethods<T>,
/// The block hash at which this client is operating.
block_hash: <T as Config>::Hash,
}
impl<'client, T: Config + 'client> OnlineClientAtBlockT<'client, T>
for OnlineClientAtBlock<'client, T>
{
fn rpc_methods(&self) -> &ChainHeadRpcMethods<T> {
self.rpc_methods
}
fn block_hash(&self) -> <T as Config>::Hash {
self.block_hash
}
}
impl<'client, T: Config + 'client> OfflineClientAtBlockT<'client, T>
for OnlineClientAtBlock<'client, T>
{
fn config(&self) -> &'client T {
self.config
}
fn legacy_types(&'_ self) -> &TypeRegistrySet<'client> {
&self.historic_types
}
fn metadata(&self) -> &RuntimeMetadata {
&self.metadata
}
}
async fn get_spec_version<T: Config>(
rpc_methods: &ChainHeadRpcMethods<T>,
block_hash: <T as Config>::Hash,
) -> Result<u32, OnlineClientAtBlockError> {
use codec::Decode;
use subxt_rpcs::methods::chain_head::ArchiveCallResult;
// make a runtime call to get the version information. This is also a constant
// in the metadata and so we could fetch it from there to avoid the call, but it would be a
// bit more effort.
let spec_version_bytes = {
let call_res = rpc_methods
.archive_v1_call(block_hash.into(), "Core_version", &[])
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion {
block_hash: block_hash.to_string(),
reason: format!("Error calling Core_version: {e}"),
})?;
match call_res {
ArchiveCallResult::Success(bytes) => bytes.0,
ArchiveCallResult::Error(e) => {
return Err(OnlineClientAtBlockError::CannotGetSpecVersion {
block_hash: block_hash.to_string(),
reason: format!("Core_version returned an error: {e}"),
});
}
}
};
// We only care about the spec version, so just decode enough of this version information
// to be able to pluck out what we want, and ignore the rest.
let spec_version = {
#[derive(codec::Decode)]
struct SpecVersionHeader {
_spec_name: String,
_impl_name: String,
_authoring_version: u32,
spec_version: u32,
}
SpecVersionHeader::decode(&mut &spec_version_bytes[..])
.map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion {
block_hash: block_hash.to_string(),
reason: format!("Error decoding Core_version response: {e}"),
})?
.spec_version
};
Ok(spec_version)
}
async fn get_metadata<T: Config>(
rpc_methods: &ChainHeadRpcMethods<T>,
block_hash: <T as Config>::Hash,
) -> Result<RuntimeMetadata, OnlineClientAtBlockError> {
// First, try to use the "modern" metadata APIs to get the most recent version we can.
let version_to_get = rpc_methods
.archive_v1_call(block_hash.into(), "Metadata_metadata_versions", &[])
.await
.ok()
.and_then(|res| res.as_success())
.and_then(|res| <Vec<u32>>::decode(&mut &res[..]).ok())
.and_then(|versions| {
// We want to filter out the "unstable" version, which is represented by u32::MAX.
versions.into_iter().filter(|v| *v != u32::MAX).max()
});
// We had success calling the above API, so we expect the "modern" metadata API to work.
if let Some(version_to_get) = version_to_get {
let version_bytes = version_to_get.encode();
let rpc_response = rpc_methods
.archive_v1_call(
block_hash.into(),
"Metadata_metadata_at_version",
&version_bytes,
)
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error calling Metadata_metadata_at_version: {e}"),
})
.and_then(|res| match res {
ArchiveCallResult::Success(bytes) => Ok(bytes.0),
ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Calling Metadata_metadata_at_version returned an error: {e}"),
}),
})?;
// Option because we may have asked for a version that doesn't exist. Compact because we get back a Vec<u8>
// of the metadata bytes, and the Vec is preceeded by it's compact encoded length. The actual bytes are then
// decoded as a `RuntimeMetadataPrefixed`, after this.
let (_, metadata) = <Option<(Compact<u32>, RuntimeMetadataPrefixed)>>::decode(&mut &rpc_response[..])
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error decoding response for Metadata_metadata_at_version: {e}"),
})?
.ok_or_else(|| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("No metadata returned for the latest version from Metadata_metadata_versions ({version_to_get})"),
})?;
return Ok(metadata.1);
}
// We didn't get a version from Metadata_metadata_versions, so fall back to the "old" API.
let metadata_bytes = rpc_methods
.archive_v1_call(block_hash.into(), "Metadata_metadata", &[])
.await
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error calling Metadata_metadata: {e}"),
})
.and_then(|res| match res {
ArchiveCallResult::Success(bytes) => Ok(bytes.0),
ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Calling Metadata_metadata returned an error: {e}"),
}),
})?;
let (_, metadata) = <(Compact<u32>, RuntimeMetadataPrefixed)>::decode(&mut &metadata_bytes[..])
.map_err(|e| OnlineClientAtBlockError::CannotGetMetadata {
block_hash: block_hash.to_string(),
reason: format!("Error decoding response for Metadata_metadata: {e}"),
})?;
Ok(metadata.1)
}
+56
View File
@@ -0,0 +1,56 @@
pub mod polkadot;
pub mod substrate;
use scale_info_legacy::TypeRegistrySet;
use std::fmt::Display;
use std::sync::Arc;
use subxt_rpcs::RpcConfig;
pub use polkadot::PolkadotConfig;
pub use substrate::SubstrateConfig;
/// This represents the configuration needed for a specific chain. This includes
/// any hardcoded types we need to know about for that chain, as well as a means to
/// obtain historic types for that chain.
pub trait Config: RpcConfig {
/// The type of hashing used by the runtime.
type Hash: Clone
+ Copy
+ Display
+ Into<<Self as RpcConfig>::Hash>
+ From<<Self as RpcConfig>::Hash>;
/// Return the spec version for a given block number, if available.
///
/// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here,
/// but the [`crate::client::OfflineClient`] will error if this is not available for the required block number.
fn spec_version_for_block_number(&self, block_number: u64) -> Option<u32>;
/// Return the metadata for a given spec version, if available.
///
/// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here, and then
/// call [`Config::set_metadata_for_spec_version`] to give the configuration the opportunity to cache it.
/// The [`crate::client::OfflineClient`] will error if this is not available for the required spec version.
fn metadata_for_spec_version(
&self,
spec_version: u32,
) -> Option<Arc<frame_metadata::RuntimeMetadata>>;
/// Set some metadata for a given spec version. the [`crate::client::OnlineClient`] will call this if it has
/// to retrieve metadata from the chain, to give this the opportunity to cache it. The configuration can
/// do nothing if it prefers.
fn set_metadata_for_spec_version(
&self,
spec_version: u32,
metadata: Arc<frame_metadata::RuntimeMetadata>,
);
/// Return legacy types (ie types to use with Runtimes that return pre-V14 metadata) for a given spec version.
fn legacy_types_for_spec_version<'this>(
&'this self,
spec_version: u32,
) -> TypeRegistrySet<'this>;
/// Hash some bytes, for instance a block header or extrinsic, for this chain.
fn hash(s: &[u8]) -> <Self as Config>::Hash;
}
+88
View File
@@ -0,0 +1,88 @@
use super::Config;
use super::SubstrateConfig;
use scale_info_legacy::{ChainTypeRegistry, TypeRegistrySet};
use std::sync::Arc;
/// Configuration that's suitable for the Polkadot Relay Chain
pub struct PolkadotConfig(SubstrateConfig);
impl PolkadotConfig {
/// Create a new PolkadotConfig.
pub fn new() -> Self {
let config = SubstrateConfig::new()
.set_legacy_types(frame_decode::legacy_types::polkadot::relay_chain());
// TODO: Set spec versions as well with known spec version changes, to speed
// up accessing historic blocks within the known ranges. For now, we just let
// the online client look these up on chain.
Self(config)
}
/// Set the metadata to be used for decoding blocks at the given spec versions.
pub fn set_metadata_for_spec_versions(
mut self,
ranges: impl Iterator<Item = (u32, frame_metadata::RuntimeMetadata)>,
) -> Self {
self = Self(self.0.set_metadata_for_spec_versions(ranges));
self
}
/// Given an iterator of block ranges to spec version of the form `(start, end, spec_version)`, add them
/// to this configuration.
pub fn set_spec_version_for_block_ranges(
mut self,
ranges: impl Iterator<Item = (u64, u64, u32)>,
) -> Self {
self = Self(self.0.set_spec_version_for_block_ranges(ranges));
self
}
}
/// This hands back the legacy types for the Polkadot Relay Chain, which is what [`PolkadotConfig`] uses internally.
pub fn legacy_types() -> ChainTypeRegistry {
frame_decode::legacy_types::polkadot::relay_chain()
}
impl Default for PolkadotConfig {
fn default() -> Self {
Self::new()
}
}
impl Config for PolkadotConfig {
type Hash = <SubstrateConfig as Config>::Hash;
fn legacy_types_for_spec_version(&'_ self, spec_version: u32) -> TypeRegistrySet<'_> {
self.0.legacy_types_for_spec_version(spec_version)
}
fn spec_version_for_block_number(&self, block_number: u64) -> Option<u32> {
self.0.spec_version_for_block_number(block_number)
}
fn metadata_for_spec_version(
&self,
spec_version: u32,
) -> Option<Arc<frame_metadata::RuntimeMetadata>> {
self.0.metadata_for_spec_version(spec_version)
}
fn set_metadata_for_spec_version(
&self,
spec_version: u32,
metadata: Arc<frame_metadata::RuntimeMetadata>,
) {
self.0.set_metadata_for_spec_version(spec_version, metadata)
}
fn hash(s: &[u8]) -> <Self as Config>::Hash {
SubstrateConfig::hash(s)
}
}
impl subxt_rpcs::RpcConfig for PolkadotConfig {
type Hash = <SubstrateConfig as subxt_rpcs::RpcConfig>::Hash;
type Header = <SubstrateConfig as subxt_rpcs::RpcConfig>::Header;
type AccountId = <SubstrateConfig as subxt_rpcs::RpcConfig>::AccountId;
}
+129
View File
@@ -0,0 +1,129 @@
use super::Config;
use crate::utils::RangeMap;
use primitive_types::H256;
use scale_info_legacy::{ChainTypeRegistry, TypeRegistrySet};
use std::collections::HashMap;
use std::sync::Arc;
use std::sync::Mutex;
/// Configuration that's suitable for standard Substrate chains (ie those
/// that have not customized the block hash type).
pub struct SubstrateConfig {
legacy_types: ChainTypeRegistry,
spec_version_for_block_number: RangeMap<u64, u32>,
metadata_for_spec_version: Mutex<HashMap<u32, Arc<frame_metadata::RuntimeMetadata>>>,
}
impl SubstrateConfig {
/// Create a new SubstrateConfig with no legacy types.
///
/// Without any further configuration, this will only work with
/// the [`crate::client::OnlineClient`] for blocks that were produced by Runtimes
/// that emit metadata V14 or later.
///
/// To support working at any block with the [`crate::client::OnlineClient`], you
/// must call [`SubstrateConfig::set_legacy_types`] with appropriate legacy type
/// definitions.
///
/// To support working with the [`crate::client::OfflineClient`] at any block,
/// you must also call:
/// - [`SubstrateConfig::set_metadata_for_spec_versions`] to set the metadata to
/// use at each spec version we might encounter.
/// - [`SubstrateConfig::set_spec_version_for_block_ranges`] to set the spec version
/// to use for each range of blocks we might encounter.
pub fn new() -> Self {
Self {
legacy_types: ChainTypeRegistry::empty(),
spec_version_for_block_number: RangeMap::empty(),
metadata_for_spec_version: Mutex::new(HashMap::new()),
}
}
/// Set the legacy types to use for this configuration. This enables support for
/// blocks produced by Runtimes that emit metadata older than V14.
pub fn set_legacy_types(mut self, legacy_types: ChainTypeRegistry) -> Self {
self.legacy_types = legacy_types;
self
}
/// Set the metadata to be used for decoding blocks at the given spec versions.
pub fn set_metadata_for_spec_versions(
self,
ranges: impl Iterator<Item = (u32, frame_metadata::RuntimeMetadata)>,
) -> Self {
let mut map = self.metadata_for_spec_version.lock().unwrap();
for (spec_version, metadata) in ranges {
map.insert(spec_version, Arc::new(metadata));
}
drop(map);
self
}
/// Given an iterator of block ranges to spec version of the form `(start, end, spec_version)`, add them
/// to this configuration.
pub fn set_spec_version_for_block_ranges(
mut self,
ranges: impl Iterator<Item = (u64, u64, u32)>,
) -> Self {
let mut m = RangeMap::builder();
for (start, end, spec_version) in ranges {
m = m.add_range(start, end, spec_version);
}
self.spec_version_for_block_number = m.build();
self
}
}
impl Default for SubstrateConfig {
fn default() -> Self {
Self::new()
}
}
impl Config for SubstrateConfig {
type Hash = H256;
fn legacy_types_for_spec_version(&'_ self, spec_version: u32) -> TypeRegistrySet<'_> {
self.legacy_types.for_spec_version(spec_version as u64)
}
fn spec_version_for_block_number(&self, block_number: u64) -> Option<u32> {
self.spec_version_for_block_number
.get(block_number)
.copied()
}
fn metadata_for_spec_version(
&self,
spec_version: u32,
) -> Option<Arc<frame_metadata::RuntimeMetadata>> {
self.metadata_for_spec_version
.lock()
.unwrap()
.get(&spec_version)
.cloned()
}
fn set_metadata_for_spec_version(
&self,
spec_version: u32,
metadata: Arc<frame_metadata::RuntimeMetadata>,
) {
self.metadata_for_spec_version
.lock()
.unwrap()
.insert(spec_version, metadata);
}
fn hash(s: &[u8]) -> <Self as Config>::Hash {
sp_crypto_hashing::blake2_256(s).into()
}
}
impl subxt_rpcs::RpcConfig for SubstrateConfig {
type Hash = <Self as Config>::Hash;
// We don't use these types in any of the RPC methods we call,
// so don't bother setting them up:
type Header = ();
type AccountId = ();
}
+309
View File
@@ -0,0 +1,309 @@
/// Any error emitted by this crate can convert into this.
// Dev Note: All errors here are transparent, because in many places
// the inner errors are returned and so need to provide enough context
// as-is, so there shouldn't be anything to add here.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum Error {
#[error(transparent)]
OnlineClientError(#[from] OnlineClientError),
#[error(transparent)]
OfflineClientAtBlockError(#[from] OfflineClientAtBlockError),
#[error(transparent)]
OnlineClientAtBlockError(#[from] OnlineClientAtBlockError),
#[error(transparent)]
ExtrinsicsError(#[from] ExtrinsicsError),
#[error(transparent)]
ExtrinsicTransactionExtensionError(#[from] ExtrinsicTransactionExtensionError),
#[error(transparent)]
ExtrinsicCallError(#[from] ExtrinsicCallError),
#[error(transparent)]
StorageError(#[from] StorageError),
#[error(transparent)]
StorageEntryIsNotAMap(#[from] StorageEntryIsNotAMap),
#[error(transparent)]
StorageEntryIsNotAPlainValue(#[from] StorageEntryIsNotAPlainValue),
#[error(transparent)]
StorageKeyError(#[from] StorageKeyError),
#[error(transparent)]
StorageValueError(#[from] StorageValueError),
}
/// Errors consctructing an online client.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum OnlineClientError {
#[error("Cannot construct OnlineClient: The URL provided is invalid: {url}")]
InvalidUrl {
/// The URL that was invalid.
url: String,
},
#[error("Cannot construct OnlineClient owing to an RPC client error: {0}")]
RpcClientError(#[from] subxt_rpcs::Error),
}
/// Errors constructing an offline client at a specific block number.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum OfflineClientAtBlockError {
#[error(
"Cannot construct OfflineClientAtBlock: spec version not found for block number {block_number}"
)]
SpecVersionNotFound {
/// The block number for which the spec version was not found.
block_number: u64,
},
#[error(
"Cannot construct OfflineClientAtBlock: metadata not found for spec version {spec_version}"
)]
MetadataNotFound {
/// The spec version for which the metadata was not found.
spec_version: u32,
},
}
/// Errors constructing an online client at a specific block number.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum OnlineClientAtBlockError {
#[error(
"Cannot construct OnlineClientAtBlock: failed to get block hash from node for block {block_number}: {reason}"
)]
CannotGetBlockHash {
/// Block number we failed to get the hash for.
block_number: u64,
/// The error we encountered.
reason: subxt_rpcs::Error,
},
#[error("Cannot construct OnlineClientAtBlock: block number {block_number} not found")]
BlockNotFound {
/// The block number for which a block was not found.
block_number: u64,
},
#[error(
"Cannot construct OnlineClientAtBlock: failed to get spec version for block hash {block_hash}: {reason}"
)]
CannotGetSpecVersion {
/// The block hash for which we failed to get the spec version.
block_hash: String,
/// The error we encountered.
reason: String,
},
#[error(
"Cannot construct OnlineClientAtBlock: failed to get metadata for block hash {block_hash}: {reason}"
)]
CannotGetMetadata {
/// The block hash for which we failed to get the metadata.
block_hash: String,
/// The error we encountered.
reason: String,
},
}
/// Errors working with extrinsics.
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum ExtrinsicsError {
#[error("Could not fetch extrinsics: {reason}")]
FetchError {
/// The error that occurred while fetching the extrinsics.
reason: subxt_rpcs::Error,
},
#[error("Could not decode extrinsic at index {index}: {reason}")]
DecodeError {
/// The extrinsic index that failed to decode.
index: usize,
/// The error that occurred during decoding.
reason: frame_decode::extrinsics::ExtrinsicDecodeError,
},
#[error(
"Could not decode extrinsic at index {index}: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
LeftoverBytes {
/// The extrinsic index that had leftover bytes
index: usize,
/// The bytes that were left over after decoding the extrinsic.
leftover_bytes: Vec<u8>,
},
#[error("Could not decode extrinsics: Unsupported metadata version ({version})")]
UnsupportedMetadataVersion {
/// The metadata version that is not supported.
version: u32,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum ExtrinsicTransactionExtensionError {
#[error("Could not decode extrinsic transaction extensions: {reason}")]
AllDecodeError {
/// The error that occurred while decoding the transaction extensions.
reason: scale_decode::Error,
},
#[error(
"Could not decode extrinsic transaction extensions: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
AllLeftoverBytes {
/// The bytes that were left over after decoding the transaction extensions.
leftover_bytes: Vec<u8>,
},
#[error("Could not decode extrinsic transaction extension {name}: {reason}")]
DecodeError {
/// The name of the transaction extension that failed to decode.
name: String,
/// The error that occurred during decoding.
reason: scale_decode::Error,
},
#[error(
"Could not decode extrinsic transaction extension {name}: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
LeftoverBytes {
/// The name of the transaction extension that had leftover bytes.
name: String,
/// The bytes that were left over after decoding the transaction extension.
leftover_bytes: Vec<u8>,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum ExtrinsicCallError {
#[error("Could not decode the fields in extrinsic call: {reason}")]
FieldsDecodeError {
/// The error that occurred while decoding the fields of the extrinsic call.
reason: scale_decode::Error,
},
#[error(
"Could not decode the fields in extrinsic call: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
FieldsLeftoverBytes {
/// The bytes that were left over after decoding the extrinsic call.
leftover_bytes: Vec<u8>,
},
#[error("Could not decode field {name} in extrinsic call: {reason}")]
FieldDecodeError {
/// The name of the field that failed to decode.
name: String,
/// The error that occurred during decoding.
reason: scale_decode::Error,
},
#[error(
"Could not decode field {name} in extrinsic call: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
FieldLeftoverBytes {
/// The name of the field that had leftover bytes.
name: String,
/// The bytes that were left over after decoding the extrinsic call.
leftover_bytes: Vec<u8>,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[error("Storage entry is not a map: pallet {pallet_name}, storage {storage_name}")]
pub struct StorageEntryIsNotAMap {
/// The pallet containing the storage entry that was not found.
pub pallet_name: String,
/// The storage entry that was not found.
pub storage_name: String,
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[error("Storage entry is not a plain value: pallet {pallet_name}, storage {storage_name}")]
pub struct StorageEntryIsNotAPlainValue {
/// The pallet containing the storage entry that was not found.
pub pallet_name: String,
/// The storage entry that was not found.
pub storage_name: String,
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum StorageError {
#[error("RPC error interacting with storage APIs: {reason}")]
RpcError {
/// The error that occurred while fetching the storage entry.
reason: subxt_rpcs::Error,
},
#[error("Could not fetch next entry from storage subscription: {reason}")]
StorageEventError {
/// The error that occurred while fetching the next storage entry.
reason: String,
},
#[error("Could not construct storage key: {reason}")]
KeyEncodeError {
/// The error that occurred while constructing the storage key.
reason: frame_decode::storage::StorageKeyEncodeError,
},
#[error(
"Too many keys provided: expected {num_keys_expected} keys, but got {num_keys_provided}"
)]
WrongNumberOfKeysProvided {
/// The number of keys that were provided.
num_keys_provided: usize,
/// The number of keys expected.
num_keys_expected: usize,
},
#[error(
"Could not extract storage information from metadata: Unsupported metadata version ({version})"
)]
UnsupportedMetadataVersion {
/// The metadata version that is not supported.
version: u32,
},
#[error("Could not extract storage information from metadata: {reason}")]
ExtractStorageInfoError {
/// The error that occurred while extracting storage information from the metadata.
reason: frame_decode::storage::StorageInfoError<'static>,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum StorageKeyError {
#[error("Could not decode the storage key: {reason}")]
DecodeError {
/// The error that occurred while decoding the storage key information.
reason: frame_decode::storage::StorageKeyDecodeError<String>,
},
#[error(
"Could not decode the storage key: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
LeftoverBytes {
/// The bytes that were left over after decoding the storage key.
leftover_bytes: Vec<u8>,
},
#[error("Could not decode the part of the storage key at index {index}: {reason}")]
DecodePartError {
index: usize,
reason: scale_decode::Error,
},
}
#[allow(missing_docs)]
#[derive(Debug, thiserror::Error)]
#[non_exhaustive]
pub enum StorageValueError {
#[error("Could not decode storage value: {reason}")]
DecodeError {
/// The error that occurred while decoding the storage value.
reason: scale_decode::Error,
},
#[error(
"Could not decode storage value: there were undecoded bytes at the end, which implies that we did not decode it properly"
)]
LeftoverBytes {
/// The bytes that were left over after decoding the storage value.
leftover_bytes: Vec<u8>,
},
}
+76
View File
@@ -0,0 +1,76 @@
use crate::client::{OfflineClientAtBlockT, OnlineClientAtBlockT};
use crate::config::Config;
use crate::error::ExtrinsicsError;
mod extrinsic_call;
mod extrinsic_info;
mod extrinsic_transaction_extensions;
mod extrinsics_type;
pub use extrinsic_transaction_extensions::ExtrinsicTransactionExtensions;
pub use extrinsics_type::{Extrinsic, Extrinsics};
/// Work with extrinsics.
pub struct ExtrinsicsClient<'atblock, Client, T> {
client: &'atblock Client,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, Client, T> ExtrinsicsClient<'atblock, Client, T> {
/// Work with extrinsics.
pub(crate) fn new(client: &'atblock Client) -> Self {
Self {
client,
marker: std::marker::PhantomData,
}
}
}
// Things that we can do online with extrinsics.
impl<'atblock, 'client: 'atblock, Client, T> ExtrinsicsClient<'atblock, Client, T>
where
T: Config + 'client,
Client: OnlineClientAtBlockT<'client, T>,
{
/// Fetch the extrinsics for the current block. This is essentially a
/// combination of [`Self::fetch_bytes`] and [`Self::decode_from`].
pub async fn fetch(&self) -> Result<Extrinsics<'atblock>, ExtrinsicsError> {
let bytes: Vec<Vec<u8>> = self.fetch_bytes().await?;
// Small optimization; no need to decode anything if no bytes.
if bytes.is_empty() {
return Ok(Extrinsics::empty());
}
self.decode_from(bytes)
}
/// Fetch the bytes for the extrinsics in the current block.
pub async fn fetch_bytes(&self) -> Result<Vec<Vec<u8>>, ExtrinsicsError> {
let bytes: Vec<Vec<u8>> = self
.client
.rpc_methods()
.archive_v1_body(self.client.block_hash().into())
.await
.map_err(|e| ExtrinsicsError::FetchError { reason: e })?
.map(|body| body.into_iter().map(|b| b.0).collect())
.unwrap_or_default();
Ok(bytes)
}
}
// Things that we can do offline with extrinsics.
impl<'atblock, 'client: 'atblock, Client, T> ExtrinsicsClient<'atblock, Client, T>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
/// Given some bytes representing the extrinsics in this block, decode them into an [`Extrinsics`] type.
pub fn decode_from(
&self,
bytes: Vec<Vec<u8>>,
) -> Result<Extrinsics<'atblock>, ExtrinsicsError> {
Extrinsics::new(bytes, self.client)
}
}
+178
View File
@@ -0,0 +1,178 @@
use super::extrinsic_info::{AnyExtrinsicInfo, with_info};
use crate::error::ExtrinsicCallError;
use crate::utils::Either;
use scale_info_legacy::{LookupName, TypeRegistrySet};
/// This represents the call data in the extrinsic.
pub struct ExtrinsicCall<'extrinsics, 'atblock> {
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
}
impl<'extrinsics, 'atblock> ExtrinsicCall<'extrinsics, 'atblock> {
pub(crate) fn new(
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
) -> Self {
Self { all_bytes, info }
}
/// The index of the pallet that this call is for
pub fn pallet_index(&self) -> u8 {
with_info!(&self.info => info.info.pallet_index())
}
/// The name of the pallet that this call is for.
pub fn pallet_name(&self) -> &str {
with_info!(&self.info => info.info.pallet_name())
}
/// The index of this call.
pub fn index(&self) -> u8 {
with_info!(&self.info => info.info.call_index())
}
/// The name of this call.
pub fn name(&self) -> &str {
with_info!(&self.info => info.info.call_name())
}
/// Get the raw bytes for the entire call, which includes the pallet and call index
/// bytes as well as the encoded arguments for each of the fields.
pub fn bytes(&self) -> &'extrinsics [u8] {
with_info!(&self.info => &self.all_bytes[info.info.call_data_range()])
}
/// Work with the fields in this call.
pub fn fields(&self) -> ExtrinsicCallFields<'extrinsics, 'atblock> {
ExtrinsicCallFields::new(self.all_bytes, self.info)
}
}
/// This represents the fields of the call.
pub struct ExtrinsicCallFields<'extrinsics, 'atblock> {
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
}
impl<'extrinsics, 'atblock> ExtrinsicCallFields<'extrinsics, 'atblock> {
pub(crate) fn new(
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
) -> Self {
Self { all_bytes, info }
}
/// Return the bytes representing the fields stored in this extrinsic.
///
/// # Note
///
/// This is a subset of [`ExtrinsicCall::bytes`] that does not include the
/// first two bytes that denote the pallet index and the variant index.
pub fn bytes(&self) -> &'extrinsics [u8] {
with_info!(&self.info => &self.all_bytes[info.info.call_data_args_range()])
}
/// Iterate over each of the fields of the extrinsic call data.
pub fn iter(&self) -> impl Iterator<Item = ExtrinsicCallField<'extrinsics, 'atblock>> {
match &self.info {
AnyExtrinsicInfo::Legacy(info) => {
Either::A(info.info.call_data().map(|named_arg| ExtrinsicCallField {
field_bytes: &self.all_bytes[named_arg.range()],
info: AnyExtrinsicCallFieldInfo::Legacy(ExtrinsicCallFieldInfo {
info: named_arg,
resolver: info.resolver,
}),
}))
}
AnyExtrinsicInfo::Current(info) => {
Either::B(info.info.call_data().map(|named_arg| ExtrinsicCallField {
field_bytes: &self.all_bytes[named_arg.range()],
info: AnyExtrinsicCallFieldInfo::Current(ExtrinsicCallFieldInfo {
info: named_arg,
resolver: info.resolver,
}),
}))
}
}
}
/// Attempt to decode the fields into the given type.
pub fn decode<T: scale_decode::DecodeAsFields>(&self) -> Result<T, ExtrinsicCallError> {
with_info!(&self.info => {
let cursor = &mut self.bytes();
let mut fields = &mut info.info.call_data().map(|named_arg| {
scale_decode::Field::new(named_arg.ty().clone(), Some(named_arg.name()))
});
let decoded = T::decode_as_fields(cursor, &mut fields, info.resolver)
.map_err(|e| ExtrinsicCallError::FieldsDecodeError { reason: e })?;
if !cursor.is_empty() {
return Err(ExtrinsicCallError::FieldsLeftoverBytes {
leftover_bytes: cursor.to_vec(),
})
}
Ok(decoded)
})
}
}
pub struct ExtrinsicCallField<'extrinsics, 'atblock> {
field_bytes: &'extrinsics [u8],
info: AnyExtrinsicCallFieldInfo<'extrinsics, 'atblock>,
}
enum AnyExtrinsicCallFieldInfo<'extrinsics, 'atblock> {
Legacy(ExtrinsicCallFieldInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(ExtrinsicCallFieldInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>),
}
struct ExtrinsicCallFieldInfo<'extrinsics, 'atblock, TypeId, Resolver> {
info: &'extrinsics frame_decode::extrinsics::NamedArg<'atblock, TypeId>,
resolver: &'atblock Resolver,
}
macro_rules! with_call_field_info {
(&$self:ident.$info:ident => $fn:expr) => {
#[allow(clippy::clone_on_copy)]
match &$self.$info {
AnyExtrinsicCallFieldInfo::Legacy($info) => $fn,
AnyExtrinsicCallFieldInfo::Current($info) => $fn,
}
};
}
impl<'extrinsics, 'atblock> ExtrinsicCallField<'extrinsics, 'atblock> {
/// Get the raw bytes for this field.
pub fn bytes(&self) -> &'extrinsics [u8] {
self.field_bytes
}
/// Get the name of this field.
pub fn name(&self) -> &'extrinsics str {
with_call_field_info!(&self.info => info.info.name())
}
/// Attempt to decode the value of this field into the given type.
pub fn decode<T: scale_decode::DecodeAsType>(&self) -> Result<T, ExtrinsicCallError> {
with_call_field_info!(&self.info => {
let cursor = &mut &*self.field_bytes;
let decoded = T::decode_as_type(cursor, info.info.ty().clone(), info.resolver)
.map_err(|e| ExtrinsicCallError::FieldDecodeError {
name: info.info.name().to_string(),
reason: e,
})?;
if !cursor.is_empty() {
return Err(ExtrinsicCallError::FieldLeftoverBytes {
name: info.info.name().to_string(),
leftover_bytes: cursor.to_vec(),
});
}
Ok(decoded)
})
}
}
+109
View File
@@ -0,0 +1,109 @@
use crate::error::ExtrinsicsError;
use frame_metadata::RuntimeMetadata;
use scale_info_legacy::{LookupName, TypeRegistrySet};
// Extrinsic information for modern or legacy extrinsics.
#[allow(clippy::large_enum_variant)]
pub enum AnyExtrinsicInfo<'atblock> {
Legacy(ExtrinsicInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(ExtrinsicInfo<'atblock, u32, scale_info::PortableRegistry>),
}
impl<'atblock> AnyExtrinsicInfo<'atblock> {
/// For a slice of extrinsics, return a vec of information about each one.
pub fn new(
bytes: &[Vec<u8>],
metadata: &'atblock RuntimeMetadata,
legacy_types: &'atblock TypeRegistrySet<'atblock>,
) -> Result<Vec<Self>, ExtrinsicsError> {
let infos = match metadata {
RuntimeMetadata::V8(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V9(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V10(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V11(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V12(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V13(m) => extrinsic_info_inner(bytes, m, legacy_types),
RuntimeMetadata::V14(m) => extrinsic_info_inner(bytes, m, &m.types),
RuntimeMetadata::V15(m) => extrinsic_info_inner(bytes, m, &m.types),
RuntimeMetadata::V16(m) => extrinsic_info_inner(bytes, m, &m.types),
unknown => {
return Err(ExtrinsicsError::UnsupportedMetadataVersion {
version: unknown.version(),
});
}
}?;
fn extrinsic_info_inner<'atblock, Info, Resolver>(
bytes: &[Vec<u8>],
args_info: &'atblock Info,
type_resolver: &'atblock Resolver,
) -> Result<Vec<AnyExtrinsicInfo<'atblock>>, ExtrinsicsError>
where
Info: frame_decode::extrinsics::ExtrinsicTypeInfo,
Info::TypeId: Clone + core::fmt::Display + core::fmt::Debug + Send + Sync + 'static,
Resolver: scale_type_resolver::TypeResolver<TypeId = Info::TypeId>,
AnyExtrinsicInfo<'atblock>: From<ExtrinsicInfo<'atblock, Info::TypeId, Resolver>>,
{
bytes
.iter()
.enumerate()
.map(|(index, bytes)| {
let cursor = &mut &**bytes;
let extrinsic_info = frame_decode::extrinsics::decode_extrinsic(
cursor,
args_info,
type_resolver,
)
.map_err(|reason| ExtrinsicsError::DecodeError { index, reason })?;
if !cursor.is_empty() {
return Err(ExtrinsicsError::LeftoverBytes {
index,
leftover_bytes: cursor.to_vec(),
});
}
Ok(ExtrinsicInfo {
info: extrinsic_info,
resolver: type_resolver,
}
.into())
})
.collect()
}
Ok(infos)
}
}
impl<'atblock> From<ExtrinsicInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>>
for AnyExtrinsicInfo<'atblock>
{
fn from(info: ExtrinsicInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>) -> Self {
AnyExtrinsicInfo::Legacy(info)
}
}
impl<'atblock> From<ExtrinsicInfo<'atblock, u32, scale_info::PortableRegistry>>
for AnyExtrinsicInfo<'atblock>
{
fn from(info: ExtrinsicInfo<'atblock, u32, scale_info::PortableRegistry>) -> Self {
AnyExtrinsicInfo::Current(info)
}
}
// Extrinsic information for a specific type ID and resolver type.
pub struct ExtrinsicInfo<'atblock, TypeId, Resolver> {
pub info: frame_decode::extrinsics::Extrinsic<'atblock, TypeId>,
pub resolver: &'atblock Resolver,
}
macro_rules! with_info {
(&$self:ident.$info:ident => $fn:expr) => {
#[allow(clippy::clone_on_copy)]
match &$self.$info {
AnyExtrinsicInfo::Legacy($info) => $fn,
AnyExtrinsicInfo::Current($info) => $fn,
}
};
}
pub(crate) use with_info;
@@ -0,0 +1,213 @@
use super::extrinsic_info::AnyExtrinsicInfo;
use crate::error::ExtrinsicTransactionExtensionError;
use crate::utils::Either;
use frame_decode::helpers::scale_decode;
use scale_info_legacy::{LookupName, TypeRegistrySet};
// Extrinsic extensions information for modern or legacy extrinsics.
enum AnyExtrinsicExtensionsInfo<'extrinsics, 'atblock> {
Legacy(ExtrinsicExtensionsInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(ExtrinsicExtensionsInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>),
}
struct ExtrinsicExtensionsInfo<'extrinsics, 'atblock, TypeId, Resolver> {
info: &'extrinsics frame_decode::extrinsics::ExtrinsicExtensions<'atblock, TypeId>,
resolver: &'atblock Resolver,
}
/// This represents the transaction extensions of an extrinsic.
pub struct ExtrinsicTransactionExtensions<'extrinsics, 'atblock> {
all_bytes: &'extrinsics [u8],
info: AnyExtrinsicExtensionsInfo<'extrinsics, 'atblock>,
}
macro_rules! with_extensions_info {
(&$self:ident.$info:ident => $fn:expr) => {
#[allow(clippy::clone_on_copy)]
match &$self.$info {
AnyExtrinsicExtensionsInfo::Legacy($info) => $fn,
AnyExtrinsicExtensionsInfo::Current($info) => $fn,
}
};
}
impl<'extrinsics, 'atblock> ExtrinsicTransactionExtensions<'extrinsics, 'atblock> {
pub(crate) fn new(
all_bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
) -> Option<Self> {
match info {
AnyExtrinsicInfo::Current(info) => {
let extension_info = info.info.transaction_extension_payload()?;
Some(Self {
all_bytes,
info: AnyExtrinsicExtensionsInfo::Current(ExtrinsicExtensionsInfo {
info: extension_info,
resolver: info.resolver,
}),
})
}
AnyExtrinsicInfo::Legacy(info) => {
let extension_info = info.info.transaction_extension_payload()?;
Some(Self {
all_bytes,
info: AnyExtrinsicExtensionsInfo::Legacy(ExtrinsicExtensionsInfo {
info: extension_info,
resolver: info.resolver,
}),
})
}
}
}
/// Get the raw bytes for all of the transaction extensions.
pub fn bytes(&self) -> &'extrinsics [u8] {
with_extensions_info!(&self.info => &self.all_bytes[info.info.range()])
}
/// iterate over each of the transaction extensions in this extrinsic.
pub fn iter(
&self,
) -> impl Iterator<Item = ExtrinsicTransactionExtension<'extrinsics, 'atblock>> {
match &self.info {
AnyExtrinsicExtensionsInfo::Legacy(extension_info) => {
let iter = extension_info
.info
.iter()
.map(|s| ExtrinsicTransactionExtension {
bytes: &self.all_bytes[s.range()],
info: ExtrinsicExtensionInfo {
name: s.name(),
type_id: s.ty(),
resolver: extension_info.resolver,
}
.into(),
});
Either::A(iter)
}
AnyExtrinsicExtensionsInfo::Current(extension_info) => {
let iter = extension_info
.info
.iter()
.map(|s| ExtrinsicTransactionExtension {
bytes: &self.all_bytes[s.range()],
info: ExtrinsicExtensionInfo {
name: s.name(),
type_id: s.ty(),
resolver: extension_info.resolver,
}
.into(),
});
Either::B(iter)
}
}
}
/// Attempt to decode the transaction extensions into a type where each field name is the name of the transaction
/// extension and the field value is the decoded extension.
pub fn decode<T: scale_decode::DecodeAsFields>(
&self,
) -> Result<T, ExtrinsicTransactionExtensionError> {
with_extensions_info!(&self.info => {
let cursor = &mut self.bytes();
let mut fields = &mut info.info.iter().map(|named_arg| {
scale_decode::Field::new(named_arg.ty().clone(), Some(named_arg.name()))
});
let decoded = T::decode_as_fields(cursor, &mut fields, info.resolver)
.map_err(|e| ExtrinsicTransactionExtensionError::AllDecodeError { reason: e })?;
if !cursor.is_empty() {
return Err(ExtrinsicTransactionExtensionError::AllLeftoverBytes {
leftover_bytes: cursor.to_vec(),
})
}
Ok(decoded)
})
}
}
// Extrinsic single extension information for modern or legacy extrinsics.
enum AnyExtrinsicExtensionInfo<'extrinsics, 'atblock> {
Legacy(ExtrinsicExtensionInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(ExtrinsicExtensionInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>),
}
impl<'extrinsics, 'atblock>
From<ExtrinsicExtensionInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>>
for AnyExtrinsicExtensionInfo<'extrinsics, 'atblock>
{
fn from(
info: ExtrinsicExtensionInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>,
) -> Self {
AnyExtrinsicExtensionInfo::Legacy(info)
}
}
impl<'extrinsics, 'atblock>
From<ExtrinsicExtensionInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>>
for AnyExtrinsicExtensionInfo<'extrinsics, 'atblock>
{
fn from(
info: ExtrinsicExtensionInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>,
) -> Self {
AnyExtrinsicExtensionInfo::Current(info)
}
}
struct ExtrinsicExtensionInfo<'extrinsics, 'atblock, TypeId, Resolver> {
name: &'extrinsics str,
type_id: &'extrinsics TypeId,
resolver: &'atblock Resolver,
}
macro_rules! with_extension_info {
(&$self:ident.$info:ident => $fn:expr) => {
#[allow(clippy::clone_on_copy)]
match &$self.$info {
AnyExtrinsicExtensionInfo::Legacy($info) => $fn,
AnyExtrinsicExtensionInfo::Current($info) => $fn,
}
};
}
/// This represents a single transaction extension in an extrinsic.
pub struct ExtrinsicTransactionExtension<'extrinsics, 'atblock> {
bytes: &'extrinsics [u8],
info: AnyExtrinsicExtensionInfo<'extrinsics, 'atblock>,
}
impl<'extrinsics, 'atblock> ExtrinsicTransactionExtension<'extrinsics, 'atblock> {
/// The bytes for this transaction extension.
pub fn bytes(&self) -> &'extrinsics [u8] {
self.bytes
}
/// The name/identifier for this transaction extension.
pub fn name(&self) -> &'extrinsics str {
with_extension_info!(&self.info => info.name)
}
/// Decode the bytes for this transaction extension into a type that implements `scale_decode::DecodeAsType`.
pub fn decode<T: scale_decode::DecodeAsType>(
&self,
) -> Result<T, ExtrinsicTransactionExtensionError> {
with_extension_info!(&self.info => {
let cursor = &mut &*self.bytes;
let decoded = T::decode_as_type(cursor, info.type_id.clone(), info.resolver)
.map_err(|reason| ExtrinsicTransactionExtensionError::DecodeError {
name: info.name.to_string(),
reason
})?;
if !cursor.is_empty() {
return Err(ExtrinsicTransactionExtensionError::LeftoverBytes {
name: info.name.to_string(),
leftover_bytes: cursor.to_vec(),
});
}
Ok(decoded)
})
}
}
+115
View File
@@ -0,0 +1,115 @@
use super::extrinsic_call::ExtrinsicCall;
use super::extrinsic_info::{AnyExtrinsicInfo, with_info};
use super::extrinsic_transaction_extensions::ExtrinsicTransactionExtensions;
use crate::client::OfflineClientAtBlockT;
use crate::config::Config;
use crate::error::ExtrinsicsError;
/// This represents some extrinsics in a block, and carries everything that we need to decode information out of them.
pub struct Extrinsics<'atblock> {
bytes: Vec<Vec<u8>>,
// Each index in this vec should line up with one index in the above vec.
infos: Vec<AnyExtrinsicInfo<'atblock>>,
}
impl<'atblock> Extrinsics<'atblock> {
// In here we hide the messy logic needed to decode extrinsics into a consistent output given either current or legacy metadata.
pub(crate) fn new<'client: 'atblock, T, Client>(
bytes: Vec<Vec<u8>>,
client: &'atblock Client,
) -> Result<Self, ExtrinsicsError>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
let infos = AnyExtrinsicInfo::new(&bytes, client.metadata(), client.legacy_types())?;
Ok(Extrinsics { bytes, infos })
}
pub(crate) fn empty() -> Self {
Self {
bytes: vec![],
infos: vec![],
}
}
/// How many extrinsics are in this block?
pub fn len(&self) -> usize {
self.bytes.len()
}
/// Are there any extrinsics in this block?
pub fn is_empty(&self) -> bool {
self.bytes.is_empty()
}
/// Iterate over the extrinsics.
pub fn iter(&self) -> impl Iterator<Item = Extrinsic<'_, 'atblock>> {
self.bytes
.iter()
.zip(self.infos.iter())
.enumerate()
.map(|(idx, (bytes, info))| Extrinsic { idx, bytes, info })
}
}
/// This represents an extrinsic, and carries everything that we need to decode information out of it.
pub struct Extrinsic<'extrinsics, 'atblock> {
idx: usize,
bytes: &'extrinsics [u8],
info: &'extrinsics AnyExtrinsicInfo<'atblock>,
}
impl<'extrinsics, 'atblock> Extrinsic<'extrinsics, 'atblock> {
/// Get the index of this extrinsic in the block.
pub fn index(&self) -> usize {
self.idx
}
/// Get the raw bytes of this extrinsic.
pub fn bytes(&self) -> &'extrinsics [u8] {
self.bytes
}
/// Is this extrinsic signed?
pub fn is_signed(&self) -> bool {
with_info!(&self.info => info.info.is_signed())
}
/// Return information about the call that this extrinsic is making.
pub fn call(&self) -> ExtrinsicCall<'extrinsics, 'atblock> {
ExtrinsicCall::new(self.bytes, self.info)
}
/// Return only the bytes of the address that signed this extrinsic.
///
/// # Note
///
/// Returns `None` if the extrinsic is not signed.
pub fn address_bytes(&self) -> Option<&'extrinsics [u8]> {
with_info!(&self.info => {
info.info
.signature_payload()
.map(|s| &self.bytes[s.address_range()])
})
}
/// Returns Some(signature_bytes) if the extrinsic was signed otherwise None is returned.
pub fn signature_bytes(&self) -> Option<&'extrinsics [u8]> {
with_info!(&self.info => {
info.info
.signature_payload()
.map(|s| &self.bytes[s.signature_range()])
})
}
/// Get information about the transaction extensions of this extrinsic.
pub fn transaction_extensions(
&self,
) -> Option<ExtrinsicTransactionExtensions<'extrinsics, 'atblock>> {
ExtrinsicTransactionExtensions::new(self.bytes, self.info)
}
}
// TODO add extrinsic.call() with .bytes, and .decode function to make it easy to decode call fields into Value or whatever.
// Then add this to the example. Make sure we can do everything that dot-block-decoder does easily.
+22
View File
@@ -0,0 +1,22 @@
//! `subxt-historic` is a library for working with non head-of-chain data on Substrate-based blockchains.
// TODO: Remove this when we're ready to release, and document everything!
#![allow(missing_docs)]
mod utils;
pub mod client;
pub mod config;
pub mod error;
pub mod extrinsics;
pub mod storage;
pub use client::{OfflineClient, OnlineClient};
pub use config::polkadot::PolkadotConfig;
pub use config::substrate::SubstrateConfig;
pub use error::Error;
/// External types and crates that may be useful.
pub mod ext {
pub use futures::stream::{Stream, StreamExt};
}
+411
View File
@@ -0,0 +1,411 @@
mod storage_entry;
mod storage_info;
mod storage_key;
mod storage_value;
use crate::client::{OfflineClientAtBlockT, OnlineClientAtBlockT};
use crate::config::Config;
use crate::error::{StorageEntryIsNotAMap, StorageEntryIsNotAPlainValue, StorageError};
use crate::storage::storage_info::with_info;
use storage_info::AnyStorageInfo;
pub use storage_entry::StorageEntry;
pub use storage_key::{StorageHasher, StorageKey, StorageKeyPart};
pub use storage_value::StorageValue;
// We take how storage keys can be passed in from `frame-decode`, so re-export here.
pub use frame_decode::storage::{IntoStorageKeys, StorageKeys};
/// Work with storage.
pub struct StorageClient<'atblock, Client, T> {
client: &'atblock Client,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, Client, T> StorageClient<'atblock, Client, T> {
/// Work with storage.
pub(crate) fn new(client: &'atblock Client) -> Self {
Self {
client,
marker: std::marker::PhantomData,
}
}
}
// Things that we can do offline with storage.
impl<'atblock, 'client: 'atblock, Client, T> StorageClient<'atblock, Client, T>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
/// Select the storage entry you'd like to work with.
pub fn entry(
&self,
pallet_name: impl Into<String>,
storage_name: impl Into<String>,
) -> Result<StorageEntryClient<'atblock, Client, T>, StorageError> {
let pallet_name = pallet_name.into();
let storage_name = storage_name.into();
let storage_info = AnyStorageInfo::new(
&pallet_name,
&storage_name,
self.client.metadata(),
self.client.legacy_types(),
)?;
if storage_info.is_map() {
Ok(StorageEntryClient::Map(StorageEntryMapClient {
client: self.client,
pallet_name,
storage_name,
info: storage_info,
marker: std::marker::PhantomData,
}))
} else {
Ok(StorageEntryClient::Plain(StorageEntryPlainClient {
client: self.client,
pallet_name,
storage_name,
info: storage_info,
marker: std::marker::PhantomData,
}))
}
}
/// Iterate over all of the storage entries listed in the metadata for the current block. This does **not** include well known
/// storage entries like `:code` which are not listed in the metadata.
pub fn entries(&self) -> impl Iterator<Item = StorageEntriesItem<'atblock, Client, T>> {
let client = self.client;
let metadata = client.metadata();
frame_decode::helpers::list_storage_entries_any(metadata).map(|entry| StorageEntriesItem {
entry,
client: self.client,
marker: std::marker::PhantomData,
})
}
}
/// Working with a specific storage entry.
pub struct StorageEntriesItem<'atblock, Client, T> {
entry: frame_decode::helpers::StorageEntry<'atblock>,
client: &'atblock Client,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, 'client: 'atblock, Client, T> StorageEntriesItem<'atblock, Client, T>
where
T: Config + 'client,
Client: OfflineClientAtBlockT<'client, T>,
{
/// The pallet name.
pub fn pallet_name(&self) -> &str {
self.entry.pallet()
}
/// The storage entry name.
pub fn storage_name(&self) -> &str {
self.entry.entry()
}
/// Extract the relevant storage information so that we can work with this entry.
pub fn entry(&self) -> Result<StorageEntryClient<'atblock, Client, T>, StorageError> {
StorageClient {
client: self.client,
marker: std::marker::PhantomData,
}
.entry(
self.entry.pallet().to_owned(),
self.entry.entry().to_owned(),
)
}
}
/// A client for working with a specific storage entry. This is an enum because the storage entry
/// might be either a map or a plain value, and each has a different interface.
pub enum StorageEntryClient<'atblock, Client, T> {
Plain(StorageEntryPlainClient<'atblock, Client, T>),
Map(StorageEntryMapClient<'atblock, Client, T>),
}
impl<'atblock, Client, T> StorageEntryClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OfflineClientAtBlockT<'atblock, T>,
{
/// Get the pallet name.
pub fn pallet_name(&self) -> &str {
match self {
StorageEntryClient::Plain(client) => &client.pallet_name,
StorageEntryClient::Map(client) => &client.pallet_name,
}
}
/// Get the storage entry name.
pub fn storage_name(&self) -> &str {
match self {
StorageEntryClient::Plain(client) => &client.storage_name,
StorageEntryClient::Map(client) => &client.storage_name,
}
}
/// Is the storage entry a plain value?
pub fn is_plain(&self) -> bool {
matches!(self, StorageEntryClient::Plain(_))
}
/// Is the storage entry a map?
pub fn is_map(&self) -> bool {
matches!(self, StorageEntryClient::Map(_))
}
/// If this storage entry is a plain value, return the client for working with it. Else return an error.
pub fn into_plain(
self,
) -> Result<StorageEntryPlainClient<'atblock, Client, T>, StorageEntryIsNotAPlainValue> {
match self {
StorageEntryClient::Plain(client) => Ok(client),
StorageEntryClient::Map(_) => Err(StorageEntryIsNotAPlainValue {
pallet_name: self.pallet_name().into(),
storage_name: self.storage_name().into(),
}),
}
}
/// If this storage entry is a map, return the client for working with it. Else return an error.
pub fn into_map(
self,
) -> Result<StorageEntryMapClient<'atblock, Client, T>, StorageEntryIsNotAMap> {
match self {
StorageEntryClient::Plain(_) => Err(StorageEntryIsNotAMap {
pallet_name: self.pallet_name().into(),
storage_name: self.storage_name().into(),
}),
StorageEntryClient::Map(client) => Ok(client),
}
}
}
/// A client for working with a plain storage entry.
pub struct StorageEntryPlainClient<'atblock, Client, T> {
client: &'atblock Client,
pallet_name: String,
storage_name: String,
info: AnyStorageInfo<'atblock>,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, Client, T> StorageEntryPlainClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OfflineClientAtBlockT<'atblock, T>,
{
/// Get the pallet name.
pub fn pallet_name(&self) -> &str {
&self.pallet_name
}
/// Get the storage entry name.
pub fn storage_name(&self) -> &str {
&self.storage_name
}
}
impl<'atblock, Client, T> StorageEntryPlainClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OnlineClientAtBlockT<'atblock, T>,
{
/// Fetch the value for this storage entry.
pub async fn fetch(&self) -> Result<Option<StorageValue<'_, 'atblock>>, StorageError> {
let key_bytes = self.key();
fetch(self.client, &key_bytes)
.await
.map(|v| v.map(|bytes| StorageValue::new(&self.info, bytes)))
}
/// The key for this storage entry.
pub fn key(&self) -> [u8; 32] {
let pallet_name = &*self.pallet_name;
let storage_name = &*self.storage_name;
frame_decode::storage::encode_prefix(pallet_name, storage_name)
}
}
/// A client for working with a storage entry that is a map.
pub struct StorageEntryMapClient<'atblock, Client, T> {
client: &'atblock Client,
pallet_name: String,
storage_name: String,
info: AnyStorageInfo<'atblock>,
marker: std::marker::PhantomData<T>,
}
impl<'atblock, Client, T> StorageEntryMapClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OfflineClientAtBlockT<'atblock, T>,
{
/// Get the pallet name.
pub fn pallet_name(&self) -> &str {
&self.pallet_name
}
/// Get the storage entry name.
pub fn storage_name(&self) -> &str {
&self.storage_name
}
}
impl<'atblock, Client, T> StorageEntryMapClient<'atblock, Client, T>
where
T: Config + 'atblock,
Client: OnlineClientAtBlockT<'atblock, T>,
{
/// Fetch a specific key in this map. If the number of keys provided is not equal
/// to the number of keys required to fetch a single value from the map, then an error
/// will be emitted.
pub async fn fetch<Keys: IntoStorageKeys>(
&self,
keys: Keys,
) -> Result<Option<StorageValue<'_, 'atblock>>, StorageError> {
let expected_num_keys = with_info!(info = &self.info => {
info.info.keys.len()
});
if expected_num_keys != keys.num_keys() {
return Err(StorageError::WrongNumberOfKeysProvided {
num_keys_provided: keys.num_keys(),
num_keys_expected: expected_num_keys,
});
}
let key_bytes = self.key(keys)?;
fetch(self.client, &key_bytes)
.await
.map(|v| v.map(|bytes| StorageValue::new(&self.info, bytes)))
}
/// Iterate over the values underneath the provided keys.
pub async fn iter<Keys: IntoStorageKeys>(
&self,
keys: Keys,
) -> Result<
impl futures::Stream<Item = Result<StorageEntry<'_, 'atblock>, StorageError>> + Unpin,
StorageError,
> {
use futures::stream::StreamExt;
use subxt_rpcs::methods::chain_head::{
ArchiveStorageEvent, StorageQuery, StorageQueryType,
};
let block_hash = self.client.block_hash();
let key_bytes = self.key(keys)?;
let items = std::iter::once(StorageQuery {
key: &*key_bytes,
query_type: StorageQueryType::DescendantsValues,
});
let sub = self
.client
.rpc_methods()
.archive_v1_storage(block_hash.into(), items, None)
.await
.map_err(|e| StorageError::RpcError { reason: e })?;
let sub = sub.filter_map(async |item| {
let item = match item {
Ok(ArchiveStorageEvent::Item(item)) => item,
Ok(ArchiveStorageEvent::Error(err)) => {
return Some(Err(StorageError::StorageEventError { reason: err.error }));
}
Ok(ArchiveStorageEvent::Done) => return None,
Err(e) => return Some(Err(StorageError::RpcError { reason: e })),
};
item.value
.map(|value| Ok(StorageEntry::new(&self.info, item.key.0, value.0)))
});
Ok(Box::pin(sub))
}
// Encode a storage key for this storage entry to bytes. The key can be a partial key
// (i.e there are still multiple values below it) or a complete key that points to a specific value.
//
// Dev note: We don't have any functions that can take an already-encoded key and fetch an entry from
// it yet, so we don't expose this. If we did expose it, we might want to return some struct that wraps
// the key bytes and some metadata about them. Or maybe just fetch_raw and iter_raw.
fn key<Keys: IntoStorageKeys>(&self, keys: Keys) -> Result<Vec<u8>, StorageError> {
with_info!(info = &self.info => {
let mut key_bytes = Vec::new();
frame_decode::storage::encode_storage_key_with_info_to(
&self.pallet_name,
&self.storage_name,
keys,
&info.info,
info.resolver,
&mut key_bytes,
).map_err(|e| StorageError::KeyEncodeError { reason: e })?;
Ok(key_bytes)
})
}
}
// Fetch a single storage value by its key.
async fn fetch<'atblock, Client, T>(
client: &Client,
key_bytes: &[u8],
) -> Result<Option<Vec<u8>>, StorageError>
where
T: Config + 'atblock,
Client: OnlineClientAtBlockT<'atblock, T>,
{
use subxt_rpcs::methods::chain_head::{ArchiveStorageEvent, StorageQuery, StorageQueryType};
let query = StorageQuery {
key: key_bytes,
query_type: StorageQueryType::Value,
};
let mut response_stream = client
.rpc_methods()
.archive_v1_storage(client.block_hash().into(), std::iter::once(query), None)
.await
.map_err(|e| StorageError::RpcError { reason: e })?;
let value = response_stream
.next()
.await
.transpose()
.map_err(|e| StorageError::RpcError { reason: e })?;
// No value found.
let Some(value) = value else {
return Ok(None);
};
let item = match value {
ArchiveStorageEvent::Item(item) => item,
// if it errors, return the error:
ArchiveStorageEvent::Error(err) => {
return Err(StorageError::StorageEventError { reason: err.error });
}
// if it's done, it means no value was returned:
ArchiveStorageEvent::Done => return Ok(None),
};
// This shouldn't happen, but if it does, the value we wanted wasn't found.
if item.key.0 != key_bytes {
return Ok(None);
}
// The bytes for the storage value. If this is None, then the API is misbehaving,
// ot no matching value was found.
let Some(value_bytes) = item.value else {
return Ok(None);
};
Ok(Some(value_bytes.0))
}
+48
View File
@@ -0,0 +1,48 @@
use super::storage_info::AnyStorageInfo;
use super::storage_key::StorageKey;
use super::storage_value::StorageValue;
use crate::error::{StorageKeyError, StorageValueError};
use scale_decode::DecodeAsType;
/// This represents a storage entry, which is a key-value pair in the storage.
pub struct StorageEntry<'entry, 'atblock> {
key: Vec<u8>,
// This contains the storage information already:
value: StorageValue<'entry, 'atblock>,
}
impl<'entry, 'atblock> StorageEntry<'entry, 'atblock> {
/// Create a new storage entry.
pub fn new(info: &'entry AnyStorageInfo<'atblock>, key: Vec<u8>, value: Vec<u8>) -> Self {
Self {
key,
value: StorageValue::new(info, value),
}
}
/// Get the raw bytes for this storage entry's key.
pub fn key_bytes(&self) -> &[u8] {
&self.key
}
/// Get the raw bytes for this storage entry's value.
pub fn value_bytes(&self) -> &[u8] {
self.value.bytes()
}
/// Consume this storage entry and return the raw bytes for the key and value.
pub fn into_key_and_value_bytes(self) -> (Vec<u8>, Vec<u8>) {
(self.key, self.value.into_bytes())
}
/// Decode the key for this storage entry. This gives back a type from which we can
/// decode specific parts of the key hash (where applicable).
pub fn decode_key(&'_ self) -> Result<StorageKey<'_, 'atblock>, StorageKeyError> {
StorageKey::new(self.value.info, &self.key)
}
/// Decode this storage value.
pub fn decode_value<T: DecodeAsType>(&self) -> Result<T, StorageValueError> {
self.value.decode::<T>()
}
}
+102
View File
@@ -0,0 +1,102 @@
use crate::error::StorageError;
use frame_decode::storage::StorageTypeInfo;
use frame_metadata::RuntimeMetadata;
use scale_info_legacy::{LookupName, TypeRegistrySet};
pub enum AnyStorageInfo<'atblock> {
Legacy(StorageInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(StorageInfo<'atblock, u32, scale_info::PortableRegistry>),
}
impl<'atblock> AnyStorageInfo<'atblock> {
/// For a slice of storage entries, return a vec of information about each one.
pub fn new(
pallet_name: &str,
entry_name: &str,
metadata: &'atblock RuntimeMetadata,
legacy_types: &'atblock TypeRegistrySet<'atblock>,
) -> Result<Self, StorageError> {
let info = match metadata {
RuntimeMetadata::V8(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V9(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V10(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V11(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V12(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V13(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types),
RuntimeMetadata::V14(m) => storage_info_inner(pallet_name, entry_name, m, &m.types),
RuntimeMetadata::V15(m) => storage_info_inner(pallet_name, entry_name, m, &m.types),
RuntimeMetadata::V16(m) => storage_info_inner(pallet_name, entry_name, m, &m.types),
unknown => {
return Err(StorageError::UnsupportedMetadataVersion {
version: unknown.version(),
});
}
}?;
fn storage_info_inner<'atblock, Info, Resolver>(
pallet_name: &str,
entry_name: &str,
m: &'atblock Info,
type_resolver: &'atblock Resolver,
) -> Result<AnyStorageInfo<'atblock>, StorageError>
where
Info: StorageTypeInfo,
Resolver: scale_type_resolver::TypeResolver<TypeId = Info::TypeId>,
AnyStorageInfo<'atblock>: From<StorageInfo<'atblock, Info::TypeId, Resolver>>,
{
m.get_storage_info(pallet_name, entry_name)
.map(|frame_storage_info| {
let info = StorageInfo {
info: frame_storage_info,
resolver: type_resolver,
};
AnyStorageInfo::from(info)
})
.map_err(|e| StorageError::ExtractStorageInfoError {
reason: e.into_owned(),
})
}
Ok(info)
}
/// Is the storage entry a map (ie something we'd provide extra keys to to access a value, or otherwise iterate over)?
pub fn is_map(&self) -> bool {
match self {
AnyStorageInfo::Legacy(info) => !info.info.keys.is_empty(),
AnyStorageInfo::Current(info) => !info.info.keys.is_empty(),
}
}
}
impl<'atblock> From<StorageInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>>
for AnyStorageInfo<'atblock>
{
fn from(info: StorageInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>) -> Self {
AnyStorageInfo::Legacy(info)
}
}
impl<'atblock> From<StorageInfo<'atblock, u32, scale_info::PortableRegistry>>
for AnyStorageInfo<'atblock>
{
fn from(info: StorageInfo<'atblock, u32, scale_info::PortableRegistry>) -> Self {
AnyStorageInfo::Current(info)
}
}
pub struct StorageInfo<'atblock, TypeId, Resolver> {
pub info: frame_decode::storage::StorageInfo<TypeId>,
pub resolver: &'atblock Resolver,
}
macro_rules! with_info {
($info:ident = $original_info:expr => $fn:expr) => {{
#[allow(clippy::clone_on_copy)]
let info = match $original_info {
AnyStorageInfo::Legacy($info) => $fn,
AnyStorageInfo::Current($info) => $fn,
};
info
}};
}
pub(crate) use with_info;
+159
View File
@@ -0,0 +1,159 @@
use super::AnyStorageInfo;
use crate::{error::StorageKeyError, storage::storage_info::with_info};
use scale_info_legacy::{LookupName, TypeRegistrySet};
// This is part of our public interface.
pub use frame_decode::storage::StorageHasher;
enum AnyStorageKeyInfo<'atblock> {
Legacy(StorageKeyInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>),
Current(StorageKeyInfo<'atblock, u32, scale_info::PortableRegistry>),
}
impl<'atblock> From<StorageKeyInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>>
for AnyStorageKeyInfo<'atblock>
{
fn from(info: StorageKeyInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>) -> Self {
AnyStorageKeyInfo::Legacy(info)
}
}
impl<'atblock> From<StorageKeyInfo<'atblock, u32, scale_info::PortableRegistry>>
for AnyStorageKeyInfo<'atblock>
{
fn from(info: StorageKeyInfo<'atblock, u32, scale_info::PortableRegistry>) -> Self {
AnyStorageKeyInfo::Current(info)
}
}
struct StorageKeyInfo<'atblock, TypeId, Resolver> {
info: frame_decode::storage::StorageKey<TypeId>,
resolver: &'atblock Resolver,
}
macro_rules! with_key_info {
($info:ident = $original_info:expr => $fn:expr) => {{
#[allow(clippy::clone_on_copy)]
let info = match $original_info {
AnyStorageKeyInfo::Legacy($info) => $fn,
AnyStorageKeyInfo::Current($info) => $fn,
};
info
}};
}
/// This represents the different parts of a storage key.
pub struct StorageKey<'entry, 'atblock> {
info: AnyStorageKeyInfo<'atblock>,
bytes: &'entry [u8],
}
impl<'entry, 'atblock> StorageKey<'entry, 'atblock> {
pub(crate) fn new(
info: &AnyStorageInfo<'atblock>,
bytes: &'entry [u8],
) -> Result<Self, StorageKeyError> {
with_info!(info = info => {
let cursor = &mut &*bytes;
let storage_key_info = frame_decode::storage::decode_storage_key_with_info(
cursor,
&info.info,
info.resolver,
).map_err(|e| {
StorageKeyError::DecodeError { reason: e.map_type_id(|id| id.to_string()) }
})?;
if !cursor.is_empty() {
return Err(StorageKeyError::LeftoverBytes {
leftover_bytes: cursor.to_vec(),
});
}
Ok(StorageKey {
info: StorageKeyInfo {
info: storage_key_info,
resolver: info.resolver,
}.into(),
bytes,
})
})
}
/// Iterate over the parts of this storage key. Each part of a storage key corresponds to a
/// single value that has been hashed.
pub fn parts(&'_ self) -> impl ExactSizeIterator<Item = StorageKeyPart<'_, 'entry, 'atblock>> {
let parts_len = with_key_info!(info = &self.info => info.info.parts().len());
(0..parts_len).map(move |index| StorageKeyPart {
index,
info: &self.info,
bytes: self.bytes,
})
}
/// Return the part of the storage key at the provided index, or `None` if the index is out of bounds.
pub fn part(&self, index: usize) -> Option<StorageKeyPart<'_, 'entry, 'atblock>> {
if index < self.parts().len() {
Some(StorageKeyPart {
index,
info: &self.info,
bytes: self.bytes,
})
} else {
None
}
}
}
/// This represents a part of a storage key.
pub struct StorageKeyPart<'key, 'entry, 'atblock> {
index: usize,
info: &'key AnyStorageKeyInfo<'atblock>,
bytes: &'entry [u8],
}
impl<'key, 'entry, 'atblock> StorageKeyPart<'key, 'entry, 'atblock> {
/// Get the raw bytes for this part of the storage key.
pub fn bytes(&self) -> &'entry [u8] {
with_key_info!(info = &self.info => {
let part = &info.info[self.index];
let hash_range = part.hash_range();
let value_range = part
.value()
.map(|v| v.range())
.unwrap_or(std::ops::Range { start: hash_range.end, end: hash_range.end });
let combined_range = std::ops::Range {
start: hash_range.start,
end: value_range.end,
};
&self.bytes[combined_range]
})
}
/// Get the hasher that was used to construct this part of the storage key.
pub fn hasher(&self) -> StorageHasher {
with_key_info!(info = &self.info => info.info[self.index].hasher())
}
/// For keys that were produced using "concat" or "identity" hashers, the value
/// is available as a part of the key hash, allowing us to decode it into anything
/// implementing [`scale_decode::DecodeAsType`]. If the key was produced using a
/// different hasher, this will return `None`.
pub fn decode<T: scale_decode::DecodeAsType>(&self) -> Result<Option<T>, StorageKeyError> {
with_key_info!(info = &self.info => {
let part_info = &info.info[self.index];
let Some(value_info) = part_info.value() else {
return Ok(None);
};
let value_bytes = &self.bytes[value_info.range()];
let value_ty = value_info.ty().clone();
let decoded_key_part = T::decode_as_type(
&mut &*value_bytes,
value_ty,
info.resolver,
).map_err(|e| StorageKeyError::DecodePartError { index: self.index, reason: e })?;
Ok(Some(decoded_key_part))
})
}
}
+48
View File
@@ -0,0 +1,48 @@
use super::storage_info::AnyStorageInfo;
use super::storage_info::with_info;
use crate::error::StorageValueError;
use scale_decode::DecodeAsType;
/// This represents a storage value.
pub struct StorageValue<'entry, 'atblock> {
pub(crate) info: &'entry AnyStorageInfo<'atblock>,
bytes: Vec<u8>,
}
impl<'entry, 'atblock> StorageValue<'entry, 'atblock> {
/// Create a new storage value.
pub fn new(info: &'entry AnyStorageInfo<'atblock>, bytes: Vec<u8>) -> Self {
Self { info, bytes }
}
/// Get the raw bytes for this storage value.
pub fn bytes(&self) -> &[u8] {
&self.bytes
}
/// Consume this storage value and return the raw bytes.
pub fn into_bytes(self) -> Vec<u8> {
self.bytes
}
/// Decode this storage value.
pub fn decode<T: DecodeAsType>(&self) -> Result<T, StorageValueError> {
with_info!(info = &self.info => {
let cursor = &mut &*self.bytes;
let value = T::decode_as_type(
cursor,
info.info.value_id.clone(),
info.resolver,
).map_err(|e| StorageValueError::DecodeError { reason: e })?;
if !cursor.is_empty() {
return Err(StorageValueError::LeftoverBytes {
leftover_bytes: cursor.to_vec(),
});
}
Ok(value)
})
}
}
+5
View File
@@ -0,0 +1,5 @@
mod either;
mod range_map;
pub use either::Either;
pub use range_map::RangeMap;
+48
View File
@@ -0,0 +1,48 @@
macro_rules! either {
($name:ident( $fst:ident, $($variant:ident),* )) => {
pub enum $name<$fst, $($variant),*> {
$fst($fst),
$($variant($variant),)*
}
impl<$fst, $($variant),*> Iterator for $name<$fst, $($variant),*>
where
$fst: Iterator,
$($variant: Iterator<Item = $fst::Item>,)*
{
type Item = $fst::Item;
fn next(&mut self) -> Option<Self::Item> {
match self {
$name::$fst(inner) => inner.next(),
$( $name::$variant(inner) => inner.next(), )*
}
}
}
impl <$fst, $($variant),*> futures::stream::Stream for $name<$fst, $($variant),*>
where
$fst: futures::stream::Stream,
$($variant: futures::stream::Stream<Item = $fst::Item>,)*
{
type Item = $fst::Item;
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
use std::pin::Pin;
// SAFETY: This is safe because we never move the inner value out of the Pin.
unsafe {
match self.get_unchecked_mut() {
$name::$fst(inner) => Pin::new_unchecked(inner).poll_next(cx),
$( $name::$variant(inner) => Pin::new_unchecked(inner).poll_next(cx), )*
}
}
}
}
}
}
either!(Either(A, B));
+158
View File
@@ -0,0 +1,158 @@
use std::fmt::Display;
/// A map that associates ranges of keys with values.
#[derive(Debug, Clone)]
pub struct RangeMap<K, V> {
// (range_start, range_ended, value). This is
// guaranteed to be sorted and have non-overlapping ranges.
mapping: Vec<(K, K, V)>,
}
impl<K: Clone + Copy + Display + PartialOrd + Ord, V> RangeMap<K, V> {
/// Build an empty [`RangeMap`] as a placeholder.
pub fn empty() -> Self {
RangeMap {
mapping: Vec::new(),
}
}
/// Build a [`RangeMap`].
pub fn builder() -> RangeMapBuilder<K, V> {
RangeMapBuilder {
mapping: Vec::new(),
}
}
/// Return the value whose key is within the range, or None if not found.
pub fn get(&self, key: K) -> Option<&V> {
let idx = self
.mapping
.binary_search_by_key(&key, |&(start, end, _)| {
if key >= start && key < end {
key
} else {
start
}
})
.ok()?;
self.mapping.get(idx).map(|(_, _, val)| val)
}
}
/// A builder for constructing a [`RangeMap`]. Use [``RangeMap::builder()`] to create one.
#[derive(Debug, Clone)]
pub struct RangeMapBuilder<K, V> {
mapping: Vec<(K, K, V)>,
}
impl<K: Clone + Copy + Display + PartialOrd + Ord, V> RangeMapBuilder<K, V> {
/// Try to add a range, mapping block numbers to a spec version.
///
/// Returns an error if the range is empty or overlaps with an existing range.
pub fn try_add_range(
&mut self,
start: K,
end: K,
val: V,
) -> Result<&mut Self, RangeMapError<K>> {
let (start, end) = if start < end {
(start, end)
} else {
(end, start)
};
if start == end {
return Err(RangeMapError::EmptyRange(start));
}
if let Some(&(s, e, _)) = self
.mapping
.iter()
.find(|&&(s, e, _)| (start < e && end > s))
{
return Err(RangeMapError::OverlappingRanges {
proposed: (start, end),
existing: (s, e),
});
}
self.mapping.push((start, end, val));
Ok(self)
}
/// Add a range of blocks with the given spec version.
///
/// # Panics
///
/// This method will panic if the range is empty or overlaps with an existing range.
pub fn add_range(mut self, start: K, end: K, val: V) -> Self {
if let Err(e) = self.try_add_range(start, end, val) {
panic!("{e}")
}
self
}
/// Finish adding ranges and build the [`RangeMap`].
pub fn build(mut self) -> RangeMap<K, V> {
self.mapping.sort_by_key(|&(start, _, _)| start);
RangeMap {
mapping: self.mapping,
}
}
}
/// An error that can occur when calling [`RangeMapBuilder::try_add_range()`].
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
pub enum RangeMapError<K: Display> {
/// An error indicating that the proposed block range is empty.
#[error("Block range cannot be empty: start and end values must be different, but got {} for both", .0)]
EmptyRange(K),
/// An error indicating that the proposed block range overlaps with an existing one.
#[error("Overlapping block ranges are not allowed: proposed range is {}..{}, but we already have {}..{}", proposed.0, proposed.1, existing.0, existing.1)]
OverlappingRanges { proposed: (K, K), existing: (K, K) },
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_rangemap_get() {
let spec_version = RangeMap::builder()
.add_range(0, 100, 1)
.add_range(100, 200, 2)
.add_range(200, 300, 3)
.build();
assert_eq!(spec_version.get(0), Some(&1));
assert_eq!(spec_version.get(50), Some(&1));
assert_eq!(spec_version.get(100), Some(&2));
assert_eq!(spec_version.get(150), Some(&2));
assert_eq!(spec_version.get(200), Some(&3));
assert_eq!(spec_version.get(250), Some(&3));
assert_eq!(spec_version.get(300), None);
}
#[test]
fn test_rangemap_set() {
let mut spec_version = RangeMap::builder()
.add_range(0, 100, 1)
.add_range(200, 300, 3);
assert_eq!(
spec_version.try_add_range(99, 130, 2).unwrap_err(),
RangeMapError::OverlappingRanges {
proposed: (99, 130),
existing: (0, 100),
}
);
assert_eq!(
spec_version.try_add_range(170, 201, 2).unwrap_err(),
RangeMapError::OverlappingRanges {
proposed: (170, 201),
existing: (200, 300),
}
);
}
}