Metadata V16: Be more dynamic over which hasher is used. (#1974)

* Use DynamicHasher256 to support Blake2 or Keccack depending on chain

* remove Config::Hash associated type, replace with HashFor<Config> alias

* Fix doc links

* fix wasm tests

* Don't strip system pallet associated types. check System.Hashing, not Hash. Rename BlockHash trait to Hash

* Tweak comment

* fmt

* fix merge

* Fix typo
This commit is contained in:
James Wilson
2025-04-23 10:12:48 +01:00
committed by GitHub
parent a8ae55a61b
commit 21b3f52191
43 changed files with 573 additions and 371 deletions
+23 -5
View File
@@ -5,7 +5,7 @@
use super::BlockError;
use crate::blocks::extrinsic_transaction_extensions::ExtrinsicTransactionExtensions;
use crate::{
config::{Config, Hasher},
config::{Config, HashFor, Hasher},
error::{Error, MetadataError},
Metadata,
};
@@ -22,6 +22,7 @@ pub use crate::blocks::StaticExtrinsic;
pub struct Extrinsics<T: Config> {
extrinsics: Vec<Arc<(Extrinsic<'static, u32>, Vec<u8>)>>,
metadata: Metadata,
hasher: T::Hasher,
_marker: core::marker::PhantomData<T>,
}
@@ -30,6 +31,7 @@ impl<T: Config> Extrinsics<T> {
/// each extrinsic hash (in the form of bytes) and some metadata that
/// we'll use to decode them.
pub fn decode_from(extrinsics: Vec<Vec<u8>>, metadata: Metadata) -> Result<Self, Error> {
let hasher = T::Hasher::new(&metadata);
let extrinsics = extrinsics
.into_iter()
.enumerate()
@@ -63,6 +65,7 @@ impl<T: Config> Extrinsics<T> {
Ok(Self {
extrinsics,
hasher,
metadata,
_marker: core::marker::PhantomData,
})
@@ -85,10 +88,16 @@ impl<T: Config> Extrinsics<T> {
pub fn iter(&self) -> impl Iterator<Item = ExtrinsicDetails<T>> + Send + Sync + 'static {
let extrinsics = self.extrinsics.clone();
let num_extrinsics = self.extrinsics.len();
let hasher = self.hasher;
let metadata = self.metadata.clone();
(0..num_extrinsics).map(move |index| {
ExtrinsicDetails::new(index as u32, extrinsics[index].clone(), metadata.clone())
ExtrinsicDetails::new(
index as u32,
extrinsics[index].clone(),
hasher,
metadata.clone(),
)
})
}
@@ -133,6 +142,8 @@ pub struct ExtrinsicDetails<T: Config> {
index: u32,
/// Extrinsic bytes and decode info.
ext: Arc<(Extrinsic<'static, u32>, Vec<u8>)>,
/// Hash the extrinsic if we want.
hasher: T::Hasher,
/// Subxt metadata to fetch the extrinsic metadata.
metadata: Metadata,
_marker: core::marker::PhantomData<T>,
@@ -147,20 +158,22 @@ where
pub fn new(
index: u32,
ext: Arc<(Extrinsic<'static, u32>, Vec<u8>)>,
hasher: T::Hasher,
metadata: Metadata,
) -> ExtrinsicDetails<T> {
ExtrinsicDetails {
index,
ext,
hasher,
metadata,
_marker: core::marker::PhantomData,
}
}
/// Calculate and return the hash of the extrinsic, based on the configured hasher.
pub fn hash(&self) -> T::Hash {
pub fn hash(&self) -> HashFor<T> {
// Use hash(), not hash_of(), because we don't want to double encode the bytes.
T::Hasher::hash(self.bytes())
self.hasher.hash(self.bytes())
}
/// Is the extrinsic signed?
@@ -532,6 +545,7 @@ mod tests {
#[test]
fn tx_hashes_line_up() {
let metadata = metadata();
let hasher = <SubstrateConfig as Config>::Hasher::new(&metadata);
let tx = crate::dynamic::tx(
"Test",
@@ -559,7 +573,11 @@ mod tests {
// Both of these types should produce the same bytes.
assert_eq!(tx_encoded.encoded(), extrinsic.bytes(), "bytes should eq");
// Both of these types should produce the same hash.
assert_eq!(tx_encoded.hash(), extrinsic.hash(), "hashes should eq");
assert_eq!(
tx_encoded.hash_with(hasher),
extrinsic.hash(),
"hashes should eq"
);
}
#[test]
+5 -2
View File
@@ -4,7 +4,10 @@
//! A couple of client types that we use elsewhere.
use crate::{config::Config, metadata::Metadata};
use crate::{
config::{Config, HashFor},
metadata::Metadata,
};
use derive_where::derive_where;
/// This provides access to some relevant client state in transaction extensions,
@@ -12,7 +15,7 @@ use derive_where::derive_where;
#[derive_where(Clone, Debug)]
pub struct ClientState<C: Config> {
/// Genesis hash.
pub genesis_hash: C::Hash,
pub genesis_hash: HashFor<C>,
/// Runtime version.
pub runtime_version: RuntimeVersion,
/// Metadata.
+8 -3
View File
@@ -7,7 +7,11 @@
//! [`crate::config::DefaultExtrinsicParams`] provides a general-purpose
//! implementation of this that will work in many cases.
use crate::{client::ClientState, error::ExtrinsicParamsError, Config};
use crate::{
client::ClientState,
config::{Config, HashFor},
error::ExtrinsicParamsError,
};
use alloc::vec::Vec;
use core::any::Any;
@@ -74,7 +78,7 @@ pub trait Params<T: Config> {
/// Set the account nonce.
fn inject_account_nonce(&mut self, _nonce: u64) {}
/// Set the current block.
fn inject_block(&mut self, _number: u64, _hash: T::Hash) {}
fn inject_block(&mut self, _number: u64, _hash: HashFor<T>) {}
}
impl<T: Config> Params<T> for () {}
@@ -85,7 +89,8 @@ macro_rules! impl_tuples {
fn inject_account_nonce(&mut self, nonce: u64) {
$(self.$index.inject_account_nonce(nonce);)+
}
fn inject_block(&mut self, number: u64, hash: Conf::Hash) {
fn inject_block(&mut self, number: u64, hash: HashFor<Conf>) {
$(self.$index.inject_block(number, hash);)+
}
}
+16 -12
View File
@@ -20,6 +20,7 @@ use core::fmt::Debug;
use scale_decode::DecodeAsType;
use scale_encode::EncodeAsType;
use serde::{de::DeserializeOwned, Serialize};
use subxt_metadata::Metadata;
pub use default_extrinsic_params::{DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder};
pub use extrinsic_params::{ExtrinsicParams, ExtrinsicParamsEncoder};
@@ -33,9 +34,6 @@ pub use transaction_extensions::TransactionExtension;
// And we want the compiler to infer `Send` and `Sync` OK for things which have `T: Config`
// rather than having to `unsafe impl` them ourselves.
pub trait Config: Sized + Send + Sync + 'static {
/// The output of the `Hasher` function.
type Hash: BlockHash;
/// The account ID type.
type AccountId: Debug + Clone + Encode + Decode + Serialize + Send;
@@ -46,7 +44,7 @@ pub trait Config: Sized + Send + Sync + 'static {
type Signature: Debug + Clone + Encode + Decode + Send;
/// The hashing system (algorithm) being used in the runtime (e.g. Blake2).
type Hasher: Debug + Hasher<Output = Self::Hash>;
type Hasher: Debug + Clone + Copy + Hasher + Send + Sync;
/// The block header.
type Header: Debug + Header<Hasher = Self::Hasher> + Sync + Send + DeserializeOwned;
@@ -58,11 +56,14 @@ pub trait Config: Sized + Send + Sync + 'static {
type AssetId: Debug + Clone + Encode + DecodeAsType + EncodeAsType + Send;
}
/// Given some [`Config`], this returns the type of hash used.
pub type HashFor<T> = <<T as Config>::Hasher as Hasher>::Output;
/// given some [`Config`], this return the other params needed for its `ExtrinsicParams`.
pub type ParamsFor<T> = <<T as Config>::ExtrinsicParams as ExtrinsicParams<T>>::Params;
/// Block hashes must conform to a bunch of things to be used in Subxt.
pub trait BlockHash:
pub trait Hash:
Debug
+ Copy
+ Send
@@ -77,7 +78,7 @@ pub trait BlockHash:
+ core::hash::Hash
{
}
impl<T> BlockHash for T where
impl<T> Hash for T where
T: Debug
+ Copy
+ Send
@@ -97,15 +98,18 @@ impl<T> BlockHash for T where
/// and extrinsics.
pub trait Hasher {
/// The type given back from the hash operation
type Output;
type Output: Hash;
/// Construct a new hasher.
fn new(metadata: &Metadata) -> Self;
/// Hash some bytes to the given output type.
fn hash(s: &[u8]) -> Self::Output;
fn hash(&self, s: &[u8]) -> Self::Output;
/// Hash some SCALE encodable type to the given output type.
fn hash_of<S: Encode>(s: &S) -> Self::Output {
fn hash_of<S: Encode>(&self, s: &S) -> Self::Output {
let out = s.encode();
Self::hash(&out)
self.hash(&out)
}
}
@@ -120,7 +124,7 @@ pub trait Header: Sized + Encode + Decode {
fn number(&self) -> Self::Number;
/// Hash this header.
fn hash(&self) -> <Self::Hasher as Hasher>::Output {
Self::Hasher::hash_of(self)
fn hash_with(&self, hasher: Self::Hasher) -> <Self::Hasher as Hasher>::Output {
hasher.hash_of(self)
}
}
-1
View File
@@ -17,7 +17,6 @@ pub use primitive_types::{H256, U256};
pub enum PolkadotConfig {}
impl Config for PolkadotConfig {
type Hash = <SubstrateConfig as Config>::Hash;
type AccountId = <SubstrateConfig as Config>::AccountId;
type Signature = <SubstrateConfig as Config>::Signature;
type Hasher = <SubstrateConfig as Config>::Hasher;
+64 -7
View File
@@ -11,6 +11,7 @@ use alloc::vec::Vec;
use codec::{Decode, Encode};
pub use primitive_types::{H256, U256};
use serde::{Deserialize, Serialize};
use subxt_metadata::Metadata;
/// Default set of commonly used types by Substrate runtimes.
// Note: We only use this at the type level, so it should be impossible to
@@ -21,12 +22,11 @@ use serde::{Deserialize, Serialize};
pub enum SubstrateConfig {}
impl Config for SubstrateConfig {
type Hash = H256;
type AccountId = AccountId32;
type Address = MultiAddress<Self::AccountId, u32>;
type Signature = MultiSignature;
type Hasher = BlakeTwo256;
type Header = SubstrateHeader<u32, BlakeTwo256>;
type Hasher = DynamicHasher256;
type Header = SubstrateHeader<u32, DynamicHasher256>;
type ExtrinsicParams = SubstrateExtrinsicParams<Self>;
type AssetId = u32;
}
@@ -39,17 +39,73 @@ pub type SubstrateExtrinsicParams<T> = DefaultExtrinsicParams<T>;
/// This is what you provide to methods like `sign_and_submit()`.
pub type SubstrateExtrinsicParamsBuilder<T> = DefaultExtrinsicParamsBuilder<T>;
/// A type that can hash values using the blaks2_256 algorithm.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Encode)]
/// A hasher (ie implements [`Hasher`]) which hashes values using the blaks2_256 algorithm.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct BlakeTwo256;
impl Hasher for BlakeTwo256 {
type Output = H256;
fn hash(s: &[u8]) -> Self::Output {
fn new(_metadata: &Metadata) -> Self {
Self
}
fn hash(&self, s: &[u8]) -> Self::Output {
sp_crypto_hashing::blake2_256(s).into()
}
}
/// A hasher (ie implements [`Hasher`]) which inspects the runtime metadata to decide how to
/// hash types, falling back to blake2_256 if the hasher information is not available.
///
/// Currently this hasher supports only `BlakeTwo256` and `Keccak256` hashing methods.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct DynamicHasher256(HashType);
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum HashType {
// Most chains use this:
BlakeTwo256,
// Chains like Hyperbridge use this (tends to be eth compatible chains)
Keccak256,
// If we don't have V16 metadata, we'll emit this and default to BlakeTwo256.
Unknown,
}
impl Hasher for DynamicHasher256 {
type Output = H256;
fn new(metadata: &Metadata) -> Self {
// Determine the Hash associated type used for the current chain, if possible.
let Some(system_pallet) = metadata.pallet_by_name("System") else {
return Self(HashType::Unknown);
};
let Some(hash_ty_id) = system_pallet.associated_type_id("Hashing") else {
return Self(HashType::Unknown);
};
let ty = metadata
.types()
.resolve(hash_ty_id)
.expect("Type information for 'Hashing' associated type should be in metadata");
let hash_type = match ty.path.ident().as_deref().unwrap_or("") {
"BlakeTwo256" => HashType::BlakeTwo256,
"Keccak256" => HashType::Keccak256,
_ => HashType::Unknown,
};
Self(hash_type)
}
fn hash(&self, s: &[u8]) -> Self::Output {
match self.0 {
HashType::BlakeTwo256 | HashType::Unknown => sp_crypto_hashing::blake2_256(s).into(),
HashType::Keccak256 => sp_crypto_hashing::keccak_256(s).into(),
}
}
}
/// A generic Substrate header type, adapted from `sp_runtime::generic::Header`.
/// The block number and hasher can be configured to adapt this for other nodes.
#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)]
@@ -75,11 +131,12 @@ pub struct SubstrateHeader<N: Copy + Into<U256> + TryFrom<U256>, H: Hasher> {
impl<N, H> Header for SubstrateHeader<N, H>
where
N: Copy + Into<u64> + Into<U256> + TryFrom<U256> + Encode,
H: Hasher + Encode,
H: Hasher,
SubstrateHeader<N, H>: Encode + Decode,
{
type Number = N;
type Hasher = H;
fn number(&self) -> Self::Number {
self.number
}
+9 -18
View File
@@ -9,10 +9,10 @@
use super::extrinsic_params::ExtrinsicParams;
use crate::client::ClientState;
use crate::config::{ExtrinsicParamsEncoder, Header};
use crate::config::ExtrinsicParamsEncoder;
use crate::config::{Config, HashFor};
use crate::error::ExtrinsicParamsError;
use crate::utils::{Era, Static};
use crate::Config;
use alloc::borrow::ToOwned;
use alloc::boxed::Box;
use alloc::vec::Vec;
@@ -262,7 +262,7 @@ impl<T: Config> TransactionExtension<T> for CheckTxVersion {
}
/// The [`CheckGenesis`] transaction extension.
pub struct CheckGenesis<T: Config>(T::Hash);
pub struct CheckGenesis<T: Config>(HashFor<T>);
impl<T: Config> ExtrinsicParams<T> for CheckGenesis<T> {
type Params = ();
@@ -288,7 +288,7 @@ impl<T: Config> TransactionExtension<T> for CheckGenesis<T> {
/// The [`CheckMortality`] transaction extension.
pub struct CheckMortality<T: Config> {
params: CheckMortalityParamsInner<T>,
genesis_hash: T::Hash,
genesis_hash: HashFor<T>,
}
impl<T: Config> ExtrinsicParams<T> for CheckMortality<T> {
@@ -352,7 +352,7 @@ enum CheckMortalityParamsInner<T: Config> {
MortalFromBlock {
for_n_blocks: u64,
from_block_n: u64,
from_block_hash: T::Hash,
from_block_hash: HashFor<T>,
},
}
@@ -368,23 +368,14 @@ impl<T: Config> CheckMortalityParams<T> {
pub fn mortal(for_n_blocks: u64) -> Self {
Self(CheckMortalityParamsInner::MortalForBlocks(for_n_blocks))
}
/// Configure a transaction that will be mortal for the number of blocks given,
/// and from the block header provided.
pub fn mortal_from(for_n_blocks: u64, from_block: T::Header) -> Self {
Self(CheckMortalityParamsInner::MortalFromBlock {
for_n_blocks,
from_block_n: from_block.number().into(),
from_block_hash: from_block.hash(),
})
}
/// Configure a transaction that will be mortal for the number of blocks given,
/// and from the block details provided. Prefer to use [`CheckMortalityParams::mortal()`]
/// or [`CheckMortalityParams::mortal_from()`] which both avoid the block number and hash
/// from being misaligned.
/// where possible, which prevents the block number and hash from being misaligned.
pub fn mortal_from_unchecked(
for_n_blocks: u64,
from_block_n: u64,
from_block_hash: T::Hash,
from_block_hash: HashFor<T>,
) -> Self {
Self(CheckMortalityParamsInner::MortalFromBlock {
for_n_blocks,
@@ -399,7 +390,7 @@ impl<T: Config> CheckMortalityParams<T> {
}
impl<T: Config> Params<T> for CheckMortalityParams<T> {
fn inject_block(&mut self, from_block_n: u64, from_block_hash: <T as Config>::Hash) {
fn inject_block(&mut self, from_block_n: u64, from_block_hash: HashFor<T>) {
match &self.0 {
CheckMortalityParamsInner::MortalForBlocks(n) => {
self.0 = CheckMortalityParamsInner::MortalFromBlock {
+11 -7
View File
@@ -45,7 +45,11 @@ use derive_where::derive_where;
use scale_decode::{DecodeAsFields, DecodeAsType};
use subxt_metadata::PalletMetadata;
use crate::{error::MetadataError, Config, Error, Metadata};
use crate::{
config::{Config, HashFor},
error::MetadataError,
Error, Metadata,
};
/// Create a new [`Events`] instance from the given bytes.
///
@@ -232,7 +236,7 @@ pub struct EventDetails<T: Config> {
// end of everything (fields + topics)
end_idx: usize,
metadata: Metadata,
topics: Vec<T::Hash>,
topics: Vec<HashFor<T>>,
}
impl<T: Config> EventDetails<T> {
@@ -281,7 +285,7 @@ impl<T: Config> EventDetails<T> {
let event_fields_end_idx = all_bytes.len() - input.len();
// topics come after the event data in EventRecord.
let topics = Vec::<T::Hash>::decode(input)?;
let topics = Vec::<HashFor<T>>::decode(input)?;
// what bytes did we skip over in total, including topics.
let end_idx = all_bytes.len() - input.len();
@@ -413,7 +417,7 @@ impl<T: Config> EventDetails<T> {
}
/// Return the topics associated with this event.
pub fn topics(&self) -> &[T::Hash] {
pub fn topics(&self) -> &[HashFor<T>] {
&self.topics
}
}
@@ -430,7 +434,7 @@ pub struct EventMetadataDetails<'a> {
#[cfg(test)]
pub(crate) mod test_utils {
use super::*;
use crate::config::{Config, SubstrateConfig};
use crate::config::{HashFor, SubstrateConfig};
use codec::Encode;
use frame_metadata::{
v15::{
@@ -463,12 +467,12 @@ pub(crate) mod test_utils {
pub struct EventRecord<E: Encode> {
phase: Phase,
event: AllEvents<E>,
topics: Vec<<SubstrateConfig as Config>::Hash>,
topics: Vec<HashFor<SubstrateConfig>>,
}
impl<E: Encode> EventRecord<E> {
/// Create a new event record with the given phase, event, and topics.
pub fn new(phase: Phase, event: E, topics: Vec<<SubstrateConfig as Config>::Hash>) -> Self {
pub fn new(phase: Phase, event: E, topics: Vec<HashFor<SubstrateConfig>>) -> Self {
Self {
phase,
event: AllEvents::Test(event),
+10 -6
View File
@@ -9,7 +9,7 @@
//! ```rust
//! use subxt_signer::sr25519::dev;
//! use subxt_macro::subxt;
//! use subxt_core::config::PolkadotConfig;
//! use subxt_core::config::{PolkadotConfig, HashFor};
//! use subxt_core::config::DefaultExtrinsicParamsBuilder as Params;
//! use subxt_core::tx;
//! use subxt_core::utils::H256;
@@ -59,7 +59,7 @@
pub mod payload;
pub mod signer;
use crate::config::{Config, ExtrinsicParams, ExtrinsicParamsEncoder, Hasher};
use crate::config::{Config, ExtrinsicParams, ExtrinsicParamsEncoder, HashFor, Hasher};
use crate::error::{Error, ExtrinsicError, MetadataError};
use crate::metadata::Metadata;
use crate::utils::Encoded;
@@ -406,7 +406,8 @@ impl<T: Config> PartialTransactionV5<T> {
/// This represents a signed transaction that's ready to be submitted.
/// Use [`Transaction::encoded()`] or [`Transaction::into_encoded()`] to
/// get the bytes for it, or [`Transaction::hash()`] to get the hash.
/// get the bytes for it, or [`Transaction::hash_with()`] to hash the transaction
/// given an instance of [`Config::Hasher`].
pub struct Transaction<T> {
encoded: Encoded,
marker: core::marker::PhantomData<T>,
@@ -422,9 +423,12 @@ impl<T: Config> Transaction<T> {
}
}
/// Calculate and return the hash of the extrinsic, based on the configured hasher.
pub fn hash(&self) -> T::Hash {
T::Hasher::hash_of(&self.encoded)
/// Calculate and return the hash of the extrinsic, based on the provided hasher.
/// If you don't have a hasher to hand, you can construct one using the metadata
/// with `T::Hasher::new(&metadata)`. This will create a hasher suitable for the
/// current chain where possible.
pub fn hash_with(&self, hasher: T::Hasher) -> HashFor<T> {
hasher.hash_of(&self.encoded)
}
/// Returns the SCALE encoded extrinsic bytes.
+6 -4
View File
@@ -45,7 +45,7 @@ pub trait RpcConfig {
/// The block header type.
type Header: Header;
/// The block hash type.
type Hash: BlockHash;
type Hash: Hash;
/// The Account ID type.
type AccountId: AccountId;
}
@@ -55,8 +55,8 @@ pub trait Header: std::fmt::Debug + codec::Decode + serde::de::DeserializeOwned
impl<T> Header for T where T: std::fmt::Debug + codec::Decode + serde::de::DeserializeOwned {}
/// A trait which is applied to any type that is a valid block hash.
pub trait BlockHash: serde::de::DeserializeOwned + serde::Serialize {}
impl<T> BlockHash for T where T: serde::de::DeserializeOwned + serde::Serialize {}
pub trait Hash: serde::de::DeserializeOwned + serde::Serialize {}
impl<T> Hash for T where T: serde::de::DeserializeOwned + serde::Serialize {}
/// A trait which is applied to any type that is a valid Account ID.
pub trait AccountId: serde::Serialize {}
@@ -67,12 +67,14 @@ impl<T> AccountId for T where T: serde::Serialize {}
#[cfg(feature = "subxt")]
mod impl_config {
use super::*;
use subxt_core::config::HashFor;
impl<T> RpcConfig for T
where
T: subxt_core::Config,
{
type Header = T::Header;
type Hash = T::Hash;
type Hash = HashFor<T>;
type AccountId = T::AccountId;
}
}
+10 -10
View File
@@ -7,7 +7,7 @@
//! methods exposed here.
use crate::client::{rpc_params, RpcClient, RpcSubscription};
use crate::BlockHash;
use crate::Hash;
use crate::{Error, RpcConfig};
use derive_where::derive_where;
use futures::{Stream, StreamExt};
@@ -871,7 +871,7 @@ pub struct FollowSubscription<Hash> {
done: bool,
}
impl<Hash: BlockHash> FollowSubscription<Hash> {
impl<H: Hash> FollowSubscription<H> {
/// Fetch the next item in the stream.
pub async fn next(&mut self) -> Option<<Self as Stream>::Item> {
<Self as StreamExt>::next(self).await
@@ -882,8 +882,8 @@ impl<Hash: BlockHash> FollowSubscription<Hash> {
}
}
impl<Hash: BlockHash> Stream for FollowSubscription<Hash> {
type Item = <RpcSubscription<FollowEvent<Hash>> as Stream>::Item;
impl<H: Hash> Stream for FollowSubscription<H> {
type Item = <RpcSubscription<FollowEvent<H>> as Stream>::Item;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
@@ -910,15 +910,15 @@ pub struct TransactionSubscription<Hash> {
done: bool,
}
impl<Hash: BlockHash> TransactionSubscription<Hash> {
impl<H: Hash> TransactionSubscription<H> {
/// Fetch the next item in the stream.
pub async fn next(&mut self) -> Option<<Self as Stream>::Item> {
<Self as StreamExt>::next(self).await
}
}
impl<Hash: BlockHash> Stream for TransactionSubscription<Hash> {
type Item = <RpcSubscription<TransactionStatus<Hash>> as Stream>::Item;
impl<H: Hash> Stream for TransactionSubscription<H> {
type Item = <RpcSubscription<TransactionStatus<H>> as Stream>::Item;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
@@ -1031,7 +1031,7 @@ pub struct ArchiveStorageSubscription<Hash> {
done: bool,
}
impl<Hash: BlockHash> ArchiveStorageSubscription<Hash> {
impl<H: Hash> ArchiveStorageSubscription<H> {
/// Fetch the next item in the stream.
pub async fn next(&mut self) -> Option<<Self as Stream>::Item> {
<Self as StreamExt>::next(self).await
@@ -1042,8 +1042,8 @@ impl<Hash: BlockHash> ArchiveStorageSubscription<Hash> {
}
}
impl<Hash: BlockHash> Stream for ArchiveStorageSubscription<Hash> {
type Item = <RpcSubscription<ArchiveStorageEvent<Hash>> as Stream>::Item;
impl<H: Hash> Stream for ArchiveStorageSubscription<H> {
type Item = <RpcSubscription<ArchiveStorageEvent<H>> as Stream>::Item;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
+1 -2
View File
@@ -310,14 +310,13 @@ mod test {
use secp256k1::Secp256k1;
use subxt_core::utils::AccountId20;
use subxt_core::{config::*, tx::signer::Signer as SignerT, utils::H256};
use subxt_core::{config::*, tx::signer::Signer as SignerT};
use super::*;
enum StubEthRuntimeConfig {}
impl Config for StubEthRuntimeConfig {
type Hash = H256;
type AccountId = AccountId20;
type Address = AccountId20;
type Signature = Signature;
-1
View File
@@ -20,7 +20,6 @@ use runtime::runtime_types::xcm::v3::junctions::Junctions;
pub enum AssetHubConfig {}
impl Config for AssetHubConfig {
type Hash = <SubstrateConfig as Config>::Hash;
type AccountId = <SubstrateConfig as Config>::AccountId;
type Address = <PolkadotConfig as Config>::Address;
type Signature = <SubstrateConfig as Config>::Signature;
+2 -3
View File
@@ -3,7 +3,7 @@ use codec::Encode;
use subxt::client::ClientState;
use subxt::config::{
transaction_extensions::Params, Config, ExtrinsicParams, ExtrinsicParamsEncoder,
ExtrinsicParamsError,
ExtrinsicParamsError, HashFor,
};
use subxt_signer::sr25519::dev;
@@ -15,7 +15,6 @@ pub mod runtime {}
pub enum CustomConfig {}
impl Config for CustomConfig {
type Hash = subxt::utils::H256;
type AccountId = subxt::utils::AccountId32;
type Address = subxt::utils::MultiAddress<Self::AccountId, ()>;
type Signature = subxt::utils::MultiSignature;
@@ -28,7 +27,7 @@ impl Config for CustomConfig {
// This represents some arbitrary (and nonsensical) custom parameters that
// will be attached to transaction extra and additional payloads:
pub struct CustomExtrinsicParams<T: Config> {
genesis_hash: T::Hash,
genesis_hash: HashFor<T>,
tip: u128,
foo: bool,
}
@@ -19,7 +19,6 @@ pub mod runtime {}
pub enum CustomConfig {}
impl Config for CustomConfig {
type Hash = subxt::utils::H256;
type AccountId = subxt::utils::AccountId32;
type Address = subxt::utils::MultiAddress<Self::AccountId, ()>;
type Signature = subxt::utils::MultiSignature;
-1
View File
@@ -15,7 +15,6 @@ mod eth_runtime {}
enum EthRuntimeConfig {}
impl subxt::Config for EthRuntimeConfig {
type Hash = subxt::utils::H256;
type AccountId = AccountId20;
type Address = AccountId20;
type Signature = Signature;
@@ -2,7 +2,7 @@
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
use crate::config::Config;
use crate::config::{Config, HashFor};
use crate::error::Error;
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
use std::future::Future;
@@ -99,7 +99,7 @@ impl<Hash> FollowStream<Hash> {
}
/// Create a new [`FollowStream`] given the RPC methods.
pub fn from_methods<T: Config>(methods: ChainHeadRpcMethods<T>) -> FollowStream<T::Hash> {
pub fn from_methods<T: Config>(methods: ChainHeadRpcMethods<T>) -> FollowStream<HashFor<T>> {
FollowStream {
stream_getter: Box::new(move || {
let methods = methods.clone();
@@ -115,7 +115,7 @@ impl<Hash> FollowStream<Hash> {
};
// Map stream errors into the higher level subxt one:
let stream = stream.map_err(|e| e.into());
let stream: FollowEventStream<T::Hash> = Box::pin(stream);
let stream: FollowEventStream<HashFor<T>> = Box::pin(stream);
// Return both:
Ok((stream, sub_id))
})
@@ -3,7 +3,7 @@
// see LICENSE for license details.
use super::follow_stream_unpin::{BlockRef, FollowStreamMsg, FollowStreamUnpin};
use crate::config::BlockHash;
use crate::config::Hash;
use crate::error::{Error, RpcError};
use futures::stream::{Stream, StreamExt};
use std::collections::{HashMap, HashSet, VecDeque};
@@ -18,15 +18,15 @@ use subxt_rpcs::methods::chain_head::{FollowEvent, Initialized, RuntimeEvent};
/// blocks since then, as if they were each creating a unique `chainHead_follow` subscription). This
/// is the "top" layer of our follow stream subscriptions, and the one that's interacted with elsewhere.
#[derive(Debug)]
pub struct FollowStreamDriver<Hash: BlockHash> {
inner: FollowStreamUnpin<Hash>,
shared: Shared<Hash>,
pub struct FollowStreamDriver<H: Hash> {
inner: FollowStreamUnpin<H>,
shared: Shared<H>,
}
impl<Hash: BlockHash> FollowStreamDriver<Hash> {
impl<H: Hash> FollowStreamDriver<H> {
/// Create a new [`FollowStreamDriver`]. This must be polled by some executor
/// in order for any progress to be made. Things can subscribe to events.
pub fn new(follow_unpin: FollowStreamUnpin<Hash>) -> Self {
pub fn new(follow_unpin: FollowStreamUnpin<H>) -> Self {
Self {
inner: follow_unpin,
shared: Shared::default(),
@@ -34,14 +34,14 @@ impl<Hash: BlockHash> FollowStreamDriver<Hash> {
}
/// Return a handle from which we can create new subscriptions to follow events.
pub fn handle(&self) -> FollowStreamDriverHandle<Hash> {
pub fn handle(&self) -> FollowStreamDriverHandle<H> {
FollowStreamDriverHandle {
shared: self.shared.clone(),
}
}
}
impl<Hash: BlockHash> Stream for FollowStreamDriver<Hash> {
impl<H: Hash> Stream for FollowStreamDriver<H> {
type Item = Result<(), Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
@@ -65,13 +65,13 @@ impl<Hash: BlockHash> Stream for FollowStreamDriver<Hash> {
/// A handle that can be used to create subscribers, but that doesn't
/// itself subscribe to events.
#[derive(Debug, Clone)]
pub struct FollowStreamDriverHandle<Hash: BlockHash> {
shared: Shared<Hash>,
pub struct FollowStreamDriverHandle<H: Hash> {
shared: Shared<H>,
}
impl<Hash: BlockHash> FollowStreamDriverHandle<Hash> {
impl<H: Hash> FollowStreamDriverHandle<H> {
/// Subscribe to follow events.
pub fn subscribe(&self) -> FollowStreamDriverSubscription<Hash> {
pub fn subscribe(&self) -> FollowStreamDriverSubscription<H> {
self.shared.subscribe()
}
}
@@ -82,15 +82,15 @@ impl<Hash: BlockHash> FollowStreamDriverHandle<Hash> {
/// runtime information, and then any new/best block events and so on received since
/// the latest finalized block.
#[derive(Debug)]
pub struct FollowStreamDriverSubscription<Hash: BlockHash> {
pub struct FollowStreamDriverSubscription<H: Hash> {
id: usize,
done: bool,
shared: Shared<Hash>,
local_items: VecDeque<FollowStreamMsg<BlockRef<Hash>>>,
shared: Shared<H>,
local_items: VecDeque<FollowStreamMsg<BlockRef<H>>>,
}
impl<Hash: BlockHash> Stream for FollowStreamDriverSubscription<Hash> {
type Item = FollowStreamMsg<BlockRef<Hash>>;
impl<H: Hash> Stream for FollowStreamDriverSubscription<H> {
type Item = FollowStreamMsg<BlockRef<H>>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.done {
@@ -122,7 +122,7 @@ impl<Hash: BlockHash> Stream for FollowStreamDriverSubscription<Hash> {
}
}
impl<Hash: BlockHash> FollowStreamDriverSubscription<Hash> {
impl<H: Hash> FollowStreamDriverSubscription<H> {
/// Return the current subscription ID. If the subscription has stopped, then this will
/// wait until a new subscription has started with a new ID.
pub async fn subscription_id(self) -> Option<String> {
@@ -138,18 +138,18 @@ impl<Hash: BlockHash> FollowStreamDriverSubscription<Hash> {
}
/// Subscribe to the follow events, ignoring any other messages.
pub fn events(self) -> impl Stream<Item = FollowEvent<BlockRef<Hash>>> + Send + Sync {
pub fn events(self) -> impl Stream<Item = FollowEvent<BlockRef<H>>> + Send + Sync {
self.filter_map(|ev| std::future::ready(ev.into_event()))
}
}
impl<Hash: BlockHash> Clone for FollowStreamDriverSubscription<Hash> {
impl<H: Hash> Clone for FollowStreamDriverSubscription<H> {
fn clone(&self) -> Self {
self.shared.subscribe()
}
}
impl<Hash: BlockHash> Drop for FollowStreamDriverSubscription<Hash> {
impl<H: Hash> Drop for FollowStreamDriverSubscription<H> {
fn drop(&mut self) {
self.shared.remove_sub(self.id);
}
@@ -159,25 +159,25 @@ impl<Hash: BlockHash> Drop for FollowStreamDriverSubscription<Hash> {
/// events to any subscribers, and subscribers will access it to pull the
/// events destined for themselves.
#[derive(Debug, Clone)]
struct Shared<Hash: BlockHash>(Arc<Mutex<SharedState<Hash>>>);
struct Shared<H: Hash>(Arc<Mutex<SharedState<H>>>);
#[derive(Debug)]
struct SharedState<Hash: BlockHash> {
struct SharedState<H: Hash> {
done: bool,
next_id: usize,
subscribers: HashMap<usize, SubscriberDetails<Hash>>,
subscribers: HashMap<usize, SubscriberDetails<H>>,
/// Keep a buffer of all events that should be handed to a new subscription.
block_events_for_new_subscriptions: VecDeque<FollowEvent<BlockRef<Hash>>>,
block_events_for_new_subscriptions: VecDeque<FollowEvent<BlockRef<H>>>,
// Keep track of the subscription ID we send out on new subs.
current_subscription_id: Option<String>,
// Keep track of the init message we send out on new subs.
current_init_message: Option<Initialized<BlockRef<Hash>>>,
current_init_message: Option<Initialized<BlockRef<H>>>,
// Runtime events by block hash; we need to track these to know
// whether the runtime has changed when we see a finalized block notification.
seen_runtime_events: HashMap<Hash, RuntimeEvent>,
seen_runtime_events: HashMap<H, RuntimeEvent>,
}
impl<Hash: BlockHash> Default for Shared<Hash> {
impl<H: Hash> Default for Shared<H> {
fn default() -> Self {
Shared(Arc::new(Mutex::new(SharedState {
next_id: 1,
@@ -191,7 +191,7 @@ impl<Hash: BlockHash> Default for Shared<Hash> {
}
}
impl<Hash: BlockHash> Shared<Hash> {
impl<H: Hash> Shared<H> {
/// Set the shared state to "done"; no more items will be handed to it.
pub fn done(&self) {
let mut shared = self.0.lock().unwrap();
@@ -216,7 +216,7 @@ impl<Hash: BlockHash> Shared<Hash> {
&self,
sub_id: usize,
waker: &Waker,
) -> Option<VecDeque<FollowStreamMsg<BlockRef<Hash>>>> {
) -> Option<VecDeque<FollowStreamMsg<BlockRef<H>>>> {
let mut shared = self.0.lock().unwrap();
let is_done = shared.done;
@@ -236,7 +236,7 @@ impl<Hash: BlockHash> Shared<Hash> {
}
/// Push a new item out to subscribers.
pub fn push_item(&self, item: FollowStreamMsg<BlockRef<Hash>>) {
pub fn push_item(&self, item: FollowStreamMsg<BlockRef<H>>) {
let mut shared = self.0.lock().unwrap();
let shared = shared.deref_mut();
@@ -289,7 +289,7 @@ impl<Hash: BlockHash> Shared<Hash> {
// the state at the head of the chain, therefore it is correct to remove those as well.
// Idem for the pruned hashes; they will never be reported again and we remove
// them from the window of events.
let to_remove: HashSet<Hash> = finalized_ev
let to_remove: HashSet<H> = finalized_ev
.finalized_block_hashes
.iter()
.chain(finalized_ev.pruned_block_hashes.iter())
@@ -337,7 +337,7 @@ impl<Hash: BlockHash> Shared<Hash> {
}
/// Create a new subscription.
pub fn subscribe(&self) -> FollowStreamDriverSubscription<Hash> {
pub fn subscribe(&self) -> FollowStreamDriverSubscription<H> {
let mut shared = self.0.lock().unwrap();
let id = shared.next_id;
@@ -382,30 +382,30 @@ impl<Hash: BlockHash> Shared<Hash> {
/// Details for a given subscriber: any items it's not yet claimed,
/// and a way to wake it up when there are more items for it.
#[derive(Debug)]
struct SubscriberDetails<Hash: BlockHash> {
items: VecDeque<FollowStreamMsg<BlockRef<Hash>>>,
struct SubscriberDetails<H: Hash> {
items: VecDeque<FollowStreamMsg<BlockRef<H>>>,
waker: Option<Waker>,
}
/// A stream that subscribes to finalized blocks
/// and indicates whether a block was missed if was restarted.
#[derive(Debug)]
pub struct FollowStreamFinalizedHeads<Hash: BlockHash, F> {
stream: FollowStreamDriverSubscription<Hash>,
pub struct FollowStreamFinalizedHeads<H: Hash, F> {
stream: FollowStreamDriverSubscription<H>,
sub_id: Option<String>,
last_seen_block: Option<BlockRef<Hash>>,
last_seen_block: Option<BlockRef<H>>,
f: F,
is_done: bool,
}
impl<Hash: BlockHash, F> Unpin for FollowStreamFinalizedHeads<Hash, F> {}
impl<H: Hash, F> Unpin for FollowStreamFinalizedHeads<H, F> {}
impl<Hash, F> FollowStreamFinalizedHeads<Hash, F>
impl<H, F> FollowStreamFinalizedHeads<H, F>
where
Hash: BlockHash,
F: Fn(FollowEvent<BlockRef<Hash>>) -> Vec<BlockRef<Hash>>,
H: Hash,
F: Fn(FollowEvent<BlockRef<H>>) -> Vec<BlockRef<H>>,
{
pub fn new(stream: FollowStreamDriverSubscription<Hash>, f: F) -> Self {
pub fn new(stream: FollowStreamDriverSubscription<H>, f: F) -> Self {
Self {
stream,
sub_id: None,
@@ -416,12 +416,12 @@ where
}
}
impl<Hash, F> Stream for FollowStreamFinalizedHeads<Hash, F>
impl<H, F> Stream for FollowStreamFinalizedHeads<H, F>
where
Hash: BlockHash,
F: Fn(FollowEvent<BlockRef<Hash>>) -> Vec<BlockRef<Hash>>,
H: Hash,
F: Fn(FollowEvent<BlockRef<H>>) -> Vec<BlockRef<H>>,
{
type Item = Result<(String, Vec<BlockRef<Hash>>), Error>;
type Item = Result<(String, Vec<BlockRef<H>>), Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
if self.is_done {
@@ -493,14 +493,14 @@ mod test_utils {
use super::*;
/// Return a `FollowStreamDriver`
pub fn test_follow_stream_driver_getter<Hash, F, I>(
pub fn test_follow_stream_driver_getter<H, F, I>(
events: F,
max_life: usize,
) -> FollowStreamDriver<Hash>
) -> FollowStreamDriver<H>
where
Hash: BlockHash + 'static,
H: Hash + 'static,
F: Fn() -> I + Send + 'static,
I: IntoIterator<Item = Result<FollowEvent<Hash>, Error>>,
I: IntoIterator<Item = Result<FollowEvent<H>, Error>>,
{
let (stream, _) = test_unpin_stream_getter(events, max_life);
FollowStreamDriver::new(stream)
@@ -4,7 +4,7 @@
use super::follow_stream::FollowStream;
use super::ChainHeadRpcMethods;
use crate::config::{BlockHash, Config};
use crate::config::{Config, Hash, HashFor};
use crate::error::Error;
use futures::stream::{FuturesUnordered, Stream, StreamExt};
use subxt_rpcs::methods::chain_head::{
@@ -27,11 +27,11 @@ pub use super::follow_stream::FollowStreamMsg;
/// result). Put simply, it tries to keep every block pinned as long as possible until the block is no longer
/// used anywhere.
#[derive(Debug)]
pub struct FollowStreamUnpin<Hash: BlockHash> {
pub struct FollowStreamUnpin<H: Hash> {
// The underlying stream of events.
inner: FollowStream<Hash>,
inner: FollowStream<H>,
// A method to call to unpin a block, given a block hash and a subscription ID.
unpin_method: UnpinMethodHolder<Hash>,
unpin_method: UnpinMethodHolder<H>,
// Futures for sending unpin events that we'll poll to completion as
// part of polling the stream as a whole.
unpin_futs: FuturesUnordered<UnpinFut>,
@@ -46,14 +46,14 @@ pub struct FollowStreamUnpin<Hash: BlockHash> {
// The longest period a block can be pinned for.
max_block_life: usize,
// The currently seen and pinned blocks.
pinned: HashMap<Hash, PinnedDetails<Hash>>,
pinned: HashMap<H, PinnedDetails<H>>,
// Shared state about blocks we've flagged to unpin from elsewhere
unpin_flags: UnpinFlags<Hash>,
unpin_flags: UnpinFlags<H>,
}
// Just a wrapper to make implementing debug on the whole thing easier.
struct UnpinMethodHolder<Hash>(UnpinMethod<Hash>);
impl<Hash> std::fmt::Debug for UnpinMethodHolder<Hash> {
struct UnpinMethodHolder<H>(UnpinMethod<H>);
impl<H> std::fmt::Debug for UnpinMethodHolder<H> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
@@ -63,15 +63,15 @@ impl<Hash> std::fmt::Debug for UnpinMethodHolder<Hash> {
}
/// The type of the unpin method that we need to provide.
pub type UnpinMethod<Hash> = Box<dyn FnMut(Hash, Arc<str>) -> UnpinFut + Send>;
pub type UnpinMethod<H> = Box<dyn FnMut(H, Arc<str>) -> UnpinFut + Send>;
/// The future returned from [`UnpinMethod`].
pub type UnpinFut = Pin<Box<dyn Future<Output = ()> + Send + 'static>>;
impl<Hash: BlockHash> std::marker::Unpin for FollowStreamUnpin<Hash> {}
impl<H: Hash> std::marker::Unpin for FollowStreamUnpin<H> {}
impl<Hash: BlockHash> Stream for FollowStreamUnpin<Hash> {
type Item = Result<FollowStreamMsg<BlockRef<Hash>>, Error>;
impl<H: Hash> Stream for FollowStreamUnpin<H> {
type Item = Result<FollowStreamMsg<BlockRef<H>>, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut();
@@ -253,11 +253,11 @@ impl<Hash: BlockHash> Stream for FollowStreamUnpin<Hash> {
}
}
impl<Hash: BlockHash> FollowStreamUnpin<Hash> {
impl<H: Hash> FollowStreamUnpin<H> {
/// Create a new [`FollowStreamUnpin`].
pub fn new(
follow_stream: FollowStream<Hash>,
unpin_method: UnpinMethod<Hash>,
follow_stream: FollowStream<H>,
unpin_method: UnpinMethod<H>,
max_block_life: usize,
) -> Self {
Self {
@@ -274,11 +274,11 @@ impl<Hash: BlockHash> FollowStreamUnpin<Hash> {
/// Create a new [`FollowStreamUnpin`] given the RPC methods.
pub fn from_methods<T: Config>(
follow_stream: FollowStream<T::Hash>,
follow_stream: FollowStream<HashFor<T>>,
methods: ChainHeadRpcMethods<T>,
max_block_life: usize,
) -> FollowStreamUnpin<T::Hash> {
let unpin_method = Box::new(move |hash: T::Hash, sub_id: Arc<str>| {
) -> FollowStreamUnpin<HashFor<T>> {
let unpin_method = Box::new(move |hash: HashFor<T>, sub_id: Arc<str>| {
let methods = methods.clone();
let fut: UnpinFut = Box::pin(async move {
// We ignore any errors trying to unpin at the moment.
@@ -291,14 +291,14 @@ impl<Hash: BlockHash> FollowStreamUnpin<Hash> {
}
/// Is the block hash currently pinned.
pub fn is_pinned(&self, hash: &Hash) -> bool {
pub fn is_pinned(&self, hash: &H) -> bool {
self.pinned.contains_key(hash)
}
/// Pin a block, or return the reference to an already-pinned block. If the block has been registered to
/// be unpinned, we'll clear those flags, so that it won't be unpinned. If the unpin request has already
/// been sent though, then the block will be unpinned.
fn pin_block_at(&mut self, rel_block_age: usize, hash: Hash) -> BlockRef<Hash> {
fn pin_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef<H> {
self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, false)
}
@@ -306,16 +306,16 @@ impl<Hash: BlockHash> FollowStreamUnpin<Hash> {
///
/// This is the same as [`Self::pin_block_at`], except that it also marks the block as being unpinnable now,
/// which should be done for any block that will no longer be seen in future events.
fn pin_unpinnable_block_at(&mut self, rel_block_age: usize, hash: Hash) -> BlockRef<Hash> {
fn pin_unpinnable_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef<H> {
self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, true)
}
fn pin_block_at_setting_unpinnable_flag(
&mut self,
rel_block_age: usize,
hash: Hash,
hash: H,
can_be_unpinned: bool,
) -> BlockRef<Hash> {
) -> BlockRef<H> {
let entry = self
.pinned
.entry(hash)
@@ -390,10 +390,10 @@ impl<Hash: BlockHash> FollowStreamUnpin<Hash> {
// The set of block hashes that can be unpinned when ready.
// BlockRefs write to this when they are dropped.
type UnpinFlags<Hash> = Arc<Mutex<HashSet<Hash>>>;
type UnpinFlags<H> = Arc<Mutex<HashSet<H>>>;
#[derive(Debug)]
struct PinnedDetails<Hash: BlockHash> {
struct PinnedDetails<H: Hash> {
/// Relatively speaking, how old is the block? When we start following
/// blocks, the first finalized block gets an age of 0, the second an age
/// of 1 and so on.
@@ -401,7 +401,7 @@ struct PinnedDetails<Hash: BlockHash> {
/// A block ref we can hand out to keep blocks pinned.
/// Because we store one here until it's unpinned, the live count
/// will only drop to 1 when no external refs are left.
block_ref: BlockRef<Hash>,
block_ref: BlockRef<H>,
/// Has this block showed up in the list of pruned blocks, or has it
/// been finalized? In this case, it can now been pinned as it won't
/// show up again in future events (except as a "parent block" of some
@@ -411,21 +411,21 @@ struct PinnedDetails<Hash: BlockHash> {
/// All blocks reported will be wrapped in this.
#[derive(Debug, Clone)]
pub struct BlockRef<Hash: BlockHash> {
inner: Arc<BlockRefInner<Hash>>,
pub struct BlockRef<H: Hash> {
inner: Arc<BlockRefInner<H>>,
}
#[derive(Debug)]
struct BlockRefInner<Hash> {
hash: Hash,
unpin_flags: UnpinFlags<Hash>,
struct BlockRefInner<H> {
hash: H,
unpin_flags: UnpinFlags<H>,
}
impl<Hash: BlockHash> BlockRef<Hash> {
impl<H: Hash> BlockRef<H> {
/// For testing purposes only, create a BlockRef from a hash
/// that isn't pinned.
#[cfg(test)]
pub fn new(hash: Hash) -> Self {
pub fn new(hash: H) -> Self {
BlockRef {
inner: Arc::new(BlockRefInner {
hash,
@@ -435,24 +435,24 @@ impl<Hash: BlockHash> BlockRef<Hash> {
}
/// Return the hash for this block.
pub fn hash(&self) -> Hash {
pub fn hash(&self) -> H {
self.inner.hash
}
}
impl<Hash: BlockHash> PartialEq for BlockRef<Hash> {
impl<H: Hash> PartialEq for BlockRef<H> {
fn eq(&self, other: &Self) -> bool {
self.inner.hash == other.inner.hash
}
}
impl<Hash: BlockHash> PartialEq<Hash> for BlockRef<Hash> {
fn eq(&self, other: &Hash) -> bool {
impl<H: Hash> PartialEq<H> for BlockRef<H> {
fn eq(&self, other: &H) -> bool {
&self.inner.hash == other
}
}
impl<Hash: BlockHash> Drop for BlockRef<Hash> {
impl<H: Hash> Drop for BlockRef<H> {
fn drop(&mut self) {
// PinnedDetails keeps one ref, so if this is the second ref, it's the
// only "external" one left and we should ask to unpin it now. if it's
@@ -472,23 +472,23 @@ pub(super) mod test_utils {
use super::*;
use crate::config::substrate::H256;
pub type UnpinRx<Hash> = std::sync::mpsc::Receiver<(Hash, Arc<str>)>;
pub type UnpinRx<H> = std::sync::mpsc::Receiver<(H, Arc<str>)>;
/// Get a [`FollowStreamUnpin`] from an iterator over events.
pub fn test_unpin_stream_getter<Hash, F, I>(
pub fn test_unpin_stream_getter<H, F, I>(
events: F,
max_life: usize,
) -> (FollowStreamUnpin<Hash>, UnpinRx<Hash>)
) -> (FollowStreamUnpin<H>, UnpinRx<H>)
where
Hash: BlockHash + 'static,
H: Hash + 'static,
F: Fn() -> I + Send + 'static,
I: IntoIterator<Item = Result<FollowEvent<Hash>, Error>>,
I: IntoIterator<Item = Result<FollowEvent<H>, Error>>,
{
// Unpin requests will come here so that we can look out for them.
let (unpin_tx, unpin_rx) = std::sync::mpsc::channel();
let follow_stream = FollowStream::new(test_stream_getter(events));
let unpin_method: UnpinMethod<Hash> = Box::new(move |hash, sub_id| {
let unpin_method: UnpinMethod<H> = Box::new(move |hash, sub_id| {
unpin_tx.send((hash, sub_id)).unwrap();
Box::pin(std::future::ready(()))
});
@@ -498,11 +498,11 @@ pub(super) mod test_utils {
}
/// Assert that the unpinned blocks sent from the `UnpinRx` channel match the items given.
pub fn assert_from_unpin_rx<Hash: BlockHash + 'static>(
unpin_rx: &UnpinRx<Hash>,
items: impl IntoIterator<Item = Hash>,
pub fn assert_from_unpin_rx<H: Hash + 'static>(
unpin_rx: &UnpinRx<H>,
items: impl IntoIterator<Item = H>,
) {
let expected_hashes = HashSet::<Hash>::from_iter(items);
let expected_hashes = HashSet::<H>::from_iter(items);
for i in 0..expected_hashes.len() {
let Ok((hash, _)) = unpin_rx.try_recv() else {
panic!("Another unpin event is expected, but failed to pull item {i} from channel");
+39 -36
View File
@@ -21,9 +21,8 @@ use crate::backend::{
utils::retry, Backend, BlockRef, BlockRefT, RuntimeVersion, StorageResponse, StreamOf,
StreamOfResults, TransactionStatus,
};
use crate::config::BlockHash;
use crate::config::{Config, Hash, HashFor};
use crate::error::{Error, RpcError};
use crate::Config;
use async_trait::async_trait;
use follow_stream_driver::{FollowStreamDriver, FollowStreamDriverHandle};
use futures::future::Either;
@@ -130,12 +129,13 @@ impl<T: Config> ChainHeadBackendBuilder<T> {
// Construct the underlying follow_stream layers:
let rpc_methods = ChainHeadRpcMethods::new(client.into());
let follow_stream =
follow_stream::FollowStream::<T::Hash>::from_methods(rpc_methods.clone());
let follow_stream_unpin = follow_stream_unpin::FollowStreamUnpin::<T::Hash>::from_methods(
follow_stream,
rpc_methods.clone(),
self.max_block_life,
);
follow_stream::FollowStream::<HashFor<T>>::from_methods(rpc_methods.clone());
let follow_stream_unpin =
follow_stream_unpin::FollowStreamUnpin::<HashFor<T>>::from_methods(
follow_stream,
rpc_methods.clone(),
self.max_block_life,
);
let follow_stream_driver = FollowStreamDriver::new(follow_stream_unpin);
// Wrap these into the backend and driver that we'll expose.
@@ -193,11 +193,11 @@ impl<T: Config> ChainHeadBackendBuilder<T> {
/// backend to make progress.
#[derive(Debug)]
pub struct ChainHeadBackendDriver<T: Config> {
driver: FollowStreamDriver<T::Hash>,
driver: FollowStreamDriver<HashFor<T>>,
}
impl<T: Config> Stream for ChainHeadBackendDriver<T> {
type Item = <FollowStreamDriver<T::Hash> as Stream>::Item;
type Item = <FollowStreamDriver<HashFor<T>> as Stream>::Item;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
@@ -212,7 +212,7 @@ pub struct ChainHeadBackend<T: Config> {
// RPC methods we'll want to call:
methods: ChainHeadRpcMethods<T>,
// A handle to the chainHead_follow subscription:
follow_handle: FollowStreamDriverHandle<T::Hash>,
follow_handle: FollowStreamDriverHandle<HashFor<T>>,
// How long to wait until giving up on transactions:
transaction_timeout_secs: usize,
// Don't synchronise blocks with chainHead_follow when submitting txs:
@@ -229,11 +229,11 @@ impl<T: Config> ChainHeadBackend<T> {
async fn stream_headers<F>(
&self,
f: F,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error>
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error>
where
F: Fn(
FollowEvent<follow_stream_unpin::BlockRef<T::Hash>>,
) -> Vec<follow_stream_unpin::BlockRef<T::Hash>>
FollowEvent<follow_stream_unpin::BlockRef<HashFor<T>>>,
) -> Vec<follow_stream_unpin::BlockRef<HashFor<T>>>
+ Send
+ Sync
+ 'static,
@@ -275,9 +275,9 @@ impl<T: Config> ChainHeadBackend<T> {
}
}
impl<Hash: BlockHash + 'static> BlockRefT for follow_stream_unpin::BlockRef<Hash> {}
impl<Hash: BlockHash + 'static> From<follow_stream_unpin::BlockRef<Hash>> for BlockRef<Hash> {
fn from(b: follow_stream_unpin::BlockRef<Hash>) -> Self {
impl<H: Hash + 'static> BlockRefT for follow_stream_unpin::BlockRef<H> {}
impl<H: Hash + 'static> From<follow_stream_unpin::BlockRef<H>> for BlockRef<H> {
fn from(b: follow_stream_unpin::BlockRef<H>) -> Self {
BlockRef::new(b.hash(), b)
}
}
@@ -289,7 +289,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
async fn storage_fetch_values(
&self,
keys: Vec<Vec<u8>>,
at: T::Hash,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, Error> {
retry(|| async {
let queries = keys.iter().map(|key| StorageQuery {
@@ -324,7 +324,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
async fn storage_fetch_descendant_keys(
&self,
key: Vec<u8>,
at: T::Hash,
at: HashFor<T>,
) -> Result<StreamOfResults<Vec<u8>>, Error> {
retry(|| async {
// Ask for hashes, and then just ignore them and return the keys that come back.
@@ -350,7 +350,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
async fn storage_fetch_descendant_values(
&self,
key: Vec<u8>,
at: T::Hash,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, Error> {
retry(|| async {
let query = StorageQuery {
@@ -386,7 +386,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
.await
}
async fn genesis_hash(&self) -> Result<T::Hash, Error> {
async fn genesis_hash(&self) -> Result<HashFor<T>, Error> {
retry(|| async {
let genesis_hash = self.methods.chainspec_v1_genesis_hash().await?;
Ok(genesis_hash)
@@ -394,7 +394,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
.await
}
async fn block_header(&self, at: T::Hash) -> Result<Option<T::Header>, Error> {
async fn block_header(&self, at: HashFor<T>) -> Result<Option<T::Header>, Error> {
retry(|| async {
let sub_id = get_subscription_id(&self.follow_handle).await?;
let header = self.methods.chainhead_v1_header(&sub_id, at).await?;
@@ -403,7 +403,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
.await
}
async fn block_body(&self, at: T::Hash) -> Result<Option<Vec<Vec<u8>>>, Error> {
async fn block_body(&self, at: HashFor<T>) -> Result<Option<Vec<Vec<u8>>>, Error> {
retry(|| async {
let sub_id = get_subscription_id(&self.follow_handle).await?;
@@ -432,8 +432,8 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
.await
}
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<T::Hash>, Error> {
let next_ref: Option<BlockRef<T::Hash>> = self
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<HashFor<T>>, Error> {
let next_ref: Option<BlockRef<HashFor<T>>> = self
.follow_handle
.subscribe()
.events()
@@ -543,7 +543,8 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
async fn stream_all_block_headers(
&self,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error> {
_hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error> {
// TODO: https://github.com/paritytech/subxt/issues/1568
//
// It's possible that blocks may be silently missed if
@@ -560,7 +561,8 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
async fn stream_best_block_headers(
&self,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error> {
_hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error> {
// TODO: https://github.com/paritytech/subxt/issues/1568
//
// It's possible that blocks may be silently missed if
@@ -575,7 +577,8 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
async fn stream_finalized_block_headers(
&self,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error> {
_hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error> {
self.stream_headers(|ev| match ev {
FollowEvent::Initialized(init) => init.finalized_block_hashes,
FollowEvent::Finalized(ev) => ev.finalized_block_hashes,
@@ -587,12 +590,12 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
async fn submit_transaction(
&self,
extrinsic: &[u8],
) -> Result<StreamOfResults<TransactionStatus<T::Hash>>, Error> {
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, Error> {
// Submit a transaction. This makes no attempt to sync with follow events,
async fn submit_transaction_ignoring_follow_events<T: Config>(
extrinsic: &[u8],
methods: &ChainHeadRpcMethods<T>,
) -> Result<StreamOfResults<TransactionStatus<T::Hash>>, Error> {
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, Error> {
let tx_progress = methods
.transactionwatch_v1_submit_and_watch(extrinsic)
.await?
@@ -633,8 +636,8 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
extrinsic: &[u8],
transaction_timeout_secs: u64,
methods: &ChainHeadRpcMethods<T>,
follow_handle: &FollowStreamDriverHandle<T::Hash>,
) -> Result<StreamOfResults<TransactionStatus<T::Hash>>, Error> {
follow_handle: &FollowStreamDriverHandle<HashFor<T>>,
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, Error> {
// We care about new and finalized block hashes.
enum SeenBlockMarker {
New,
@@ -655,7 +658,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
// If we see the finalized event, we start waiting until we find a finalized block that
// matches, so we can guarantee to return a pinned block hash and be properly in sync
// with chainHead_follow.
let mut finalized_hash: Option<T::Hash> = None;
let mut finalized_hash: Option<HashFor<T>> = None;
// Record the start time so that we can time out if things appear to take too long.
let start_instant = web_time::Instant::now();
@@ -818,7 +821,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
&self,
method: &str,
call_parameters: Option<&[u8]>,
at: T::Hash,
at: HashFor<T>,
) -> Result<Vec<u8>, Error> {
retry(|| async {
let sub_id = get_subscription_id(&self.follow_handle).await?;
@@ -856,8 +859,8 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for ChainHeadBackend<T> {
}
/// A helper to obtain a subscription ID.
async fn get_subscription_id<Hash: BlockHash>(
follow_handle: &FollowStreamDriverHandle<Hash>,
async fn get_subscription_id<H: Hash>(
follow_handle: &FollowStreamDriverHandle<H>,
) -> Result<String, Error> {
let Some(sub_id) = follow_handle.subscribe().subscription_id().await else {
return Err(RpcError::SubscriptionDropped.into());
@@ -4,7 +4,7 @@
use super::follow_stream_driver::FollowStreamDriverHandle;
use super::follow_stream_unpin::BlockRef;
use crate::config::Config;
use crate::config::{Config, HashFor};
use crate::error::{Error, RpcError};
use futures::{FutureExt, Stream, StreamExt};
use std::collections::VecDeque;
@@ -24,7 +24,7 @@ pub struct StorageItems<T: Config> {
buffered_responses: VecDeque<StorageResult>,
continue_call: ContinueFutGetter,
continue_fut: Option<ContinueFut>,
follow_event_stream: FollowEventStream<T::Hash>,
follow_event_stream: FollowEventStream<HashFor<T>>,
}
impl<T: Config> StorageItems<T> {
@@ -33,8 +33,8 @@ impl<T: Config> StorageItems<T> {
// needed, and stop when done.
pub async fn from_methods(
queries: impl Iterator<Item = StorageQuery<&[u8]>>,
at: T::Hash,
follow_handle: &FollowStreamDriverHandle<T::Hash>,
at: HashFor<T>,
follow_handle: &FollowStreamDriverHandle<HashFor<T>>,
methods: ChainHeadRpcMethods<T>,
) -> Result<Self, Error> {
let sub_id = super::get_subscription_id(follow_handle).await?;
@@ -76,7 +76,7 @@ impl<T: Config> StorageItems<T> {
fn new(
operation_id: Arc<str>,
continue_call: ContinueFutGetter,
follow_event_stream: FollowEventStream<T::Hash>,
follow_event_stream: FollowEventStream<HashFor<T>>,
) -> Self {
Self {
done: false,
+27 -22
View File
@@ -11,7 +11,10 @@ use crate::backend::{
Backend, BlockRef, RuntimeVersion, StorageResponse, StreamOf, StreamOfResults,
TransactionStatus,
};
use crate::{config::Header, Config, Error};
use crate::{
config::{Config, HashFor, Header},
Error,
};
use async_trait::async_trait;
use futures::TryStreamExt;
use futures::{future, future::Either, stream, Future, FutureExt, Stream, StreamExt};
@@ -97,11 +100,11 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
async fn storage_fetch_values(
&self,
keys: Vec<Vec<u8>>,
at: T::Hash,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, Error> {
fn get_entry<T: Config>(
key: Vec<u8>,
at: T::Hash,
at: HashFor<T>,
methods: LegacyRpcMethods<T>,
) -> impl Future<Output = Result<Option<StorageResponse>, Error>> {
retry(move || {
@@ -134,7 +137,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
async fn storage_fetch_descendant_keys(
&self,
key: Vec<u8>,
at: T::Hash,
at: HashFor<T>,
) -> Result<StreamOfResults<Vec<u8>>, Error> {
let keys = StorageFetchDescendantKeysStream {
at,
@@ -165,7 +168,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
async fn storage_fetch_descendant_values(
&self,
key: Vec<u8>,
at: T::Hash,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, Error> {
let keys_stream = StorageFetchDescendantKeysStream {
at,
@@ -184,7 +187,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
})))
}
async fn genesis_hash(&self) -> Result<T::Hash, Error> {
async fn genesis_hash(&self) -> Result<HashFor<T>, Error> {
retry(|| async {
let hash = self.methods.genesis_hash().await?;
Ok(hash)
@@ -192,7 +195,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
.await
}
async fn block_header(&self, at: T::Hash) -> Result<Option<T::Header>, Error> {
async fn block_header(&self, at: HashFor<T>) -> Result<Option<T::Header>, Error> {
retry(|| async {
let header = self.methods.chain_get_header(Some(at)).await?;
Ok(header)
@@ -200,7 +203,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
.await
}
async fn block_body(&self, at: T::Hash) -> Result<Option<Vec<Vec<u8>>>, Error> {
async fn block_body(&self, at: HashFor<T>) -> Result<Option<Vec<Vec<u8>>>, Error> {
retry(|| async {
let Some(details) = self.methods.chain_get_block(Some(at)).await? else {
return Ok(None);
@@ -212,7 +215,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
.await
}
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<T::Hash>, Error> {
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<HashFor<T>>, Error> {
retry(|| async {
let hash = self.methods.chain_get_finalized_head().await?;
Ok(BlockRef::from_hash(hash))
@@ -270,16 +273,16 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
async fn stream_all_block_headers(
&self,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error> {
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error> {
let methods = self.methods.clone();
let retry_sub = retry_stream(move || {
let methods = methods.clone();
Box::pin(async move {
let sub = methods.chain_subscribe_all_heads().await?;
let sub = sub.map_err(|e| e.into()).map(|r| {
let sub = sub.map_err(|e| e.into()).map(move |r| {
r.map(|h| {
let hash = h.hash();
let hash = h.hash_with(hasher);
(h, BlockRef::from_hash(hash))
})
});
@@ -293,16 +296,17 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
async fn stream_best_block_headers(
&self,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error> {
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error> {
let methods = self.methods.clone();
let retry_sub = retry_stream(move || {
let methods = methods.clone();
Box::pin(async move {
let sub = methods.chain_subscribe_new_heads().await?;
let sub = sub.map_err(|e| e.into()).map(|r| {
let sub = sub.map_err(|e| e.into()).map(move |r| {
r.map(|h| {
let hash = h.hash();
let hash = h.hash_with(hasher);
(h, BlockRef::from_hash(hash))
})
});
@@ -316,7 +320,8 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
async fn stream_finalized_block_headers(
&self,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error> {
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error> {
let this = self.clone();
let retry_sub = retry_stream(move || {
@@ -338,9 +343,9 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
sub,
last_finalized_block_num,
);
let sub = sub.map(|r| {
let sub = sub.map(move |r| {
r.map(|h| {
let hash = h.hash();
let hash = h.hash_with(hasher);
(h, BlockRef::from_hash(hash))
})
});
@@ -356,7 +361,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
async fn submit_transaction(
&self,
extrinsic: &[u8],
) -> Result<StreamOfResults<TransactionStatus<T::Hash>>, Error> {
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, Error> {
let sub = self
.methods
.author_submit_and_watch_extrinsic(extrinsic)
@@ -417,7 +422,7 @@ impl<T: Config + Send + Sync + 'static> Backend<T> for LegacyBackend<T> {
&self,
method: &str,
call_parameters: Option<&[u8]>,
at: T::Hash,
at: HashFor<T>,
) -> Result<Vec<u8>, Error> {
retry(|| async {
let res = self
@@ -484,7 +489,7 @@ where
pub struct StorageFetchDescendantKeysStream<T: Config> {
methods: LegacyRpcMethods<T>,
key: Vec<u8>,
at: T::Hash,
at: HashFor<T>,
// How many entries to ask for each time.
storage_page_size: u32,
// What key do we start paginating from? None = from the beginning.
+24 -22
View File
@@ -10,9 +10,9 @@ pub mod chain_head;
pub mod legacy;
pub mod utils;
use crate::config::{Config, HashFor};
use crate::error::Error;
use crate::metadata::Metadata;
use crate::Config;
use async_trait::async_trait;
use codec::{Decode, Encode};
use futures::{Stream, StreamExt};
@@ -82,37 +82,37 @@ pub trait Backend<T: Config>: sealed::Sealed + Send + Sync + 'static {
async fn storage_fetch_values(
&self,
keys: Vec<Vec<u8>>,
at: T::Hash,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, Error>;
/// Fetch keys underneath the given key from storage.
async fn storage_fetch_descendant_keys(
&self,
key: Vec<u8>,
at: T::Hash,
at: HashFor<T>,
) -> Result<StreamOfResults<Vec<u8>>, Error>;
/// Fetch values underneath the given key from storage.
async fn storage_fetch_descendant_values(
&self,
key: Vec<u8>,
at: T::Hash,
at: HashFor<T>,
) -> Result<StreamOfResults<StorageResponse>, Error>;
/// Fetch the genesis hash
async fn genesis_hash(&self) -> Result<T::Hash, Error>;
async fn genesis_hash(&self) -> Result<HashFor<T>, Error>;
/// Get a block header
async fn block_header(&self, at: T::Hash) -> Result<Option<T::Header>, Error>;
async fn block_header(&self, at: HashFor<T>) -> Result<Option<T::Header>, Error>;
/// Return the extrinsics found in the block. Each extrinsic is represented
/// by a vector of bytes which has _not_ been SCALE decoded (in other words, the
/// first bytes in the vector will decode to the compact encoded length of the extrinsic)
async fn block_body(&self, at: T::Hash) -> Result<Option<Vec<Vec<u8>>>, Error>;
async fn block_body(&self, at: HashFor<T>) -> Result<Option<Vec<Vec<u8>>>, Error>;
/// Get the most recent finalized block hash.
/// Note: needed only in blocks client for finalized block stream; can prolly be removed.
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<T::Hash>, Error>;
async fn latest_finalized_block_ref(&self) -> Result<BlockRef<HashFor<T>>, Error>;
/// Get information about the current runtime.
async fn current_runtime_version(&self) -> Result<RuntimeVersion, Error>;
@@ -123,30 +123,33 @@ pub trait Backend<T: Config>: sealed::Sealed + Send + Sync + 'static {
/// A stream of all new block headers as they arrive.
async fn stream_all_block_headers(
&self,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error>;
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error>;
/// A stream of best block headers.
async fn stream_best_block_headers(
&self,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error>;
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error>;
/// A stream of finalized block headers.
async fn stream_finalized_block_headers(
&self,
) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error>;
hasher: T::Hasher,
) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error>;
/// Submit a transaction. This will return a stream of events about it.
async fn submit_transaction(
&self,
bytes: &[u8],
) -> Result<StreamOfResults<TransactionStatus<T::Hash>>, Error>;
) -> Result<StreamOfResults<TransactionStatus<HashFor<T>>>, Error>;
/// Make a call to some runtime API.
async fn call(
&self,
method: &str,
call_parameters: Option<&[u8]>,
at: T::Hash,
at: HashFor<T>,
) -> Result<Vec<u8>, Error>;
}
@@ -157,7 +160,7 @@ pub trait BackendExt<T: Config>: Backend<T> {
async fn storage_fetch_value(
&self,
key: Vec<u8>,
at: T::Hash,
at: HashFor<T>,
) -> Result<Option<Vec<u8>>, Error> {
self.storage_fetch_values(vec![key], at)
.await?
@@ -173,7 +176,7 @@ pub trait BackendExt<T: Config>: Backend<T> {
&self,
method: &str,
call_parameters: Option<&[u8]>,
at: T::Hash,
at: HashFor<T>,
) -> Result<D, Error> {
let bytes = self.call(method, call_parameters, at).await?;
let res = D::decode(&mut &*bytes)?;
@@ -181,7 +184,7 @@ pub trait BackendExt<T: Config>: Backend<T> {
}
/// Return the metadata at some version.
async fn metadata_at_version(&self, version: u32, at: T::Hash) -> Result<Metadata, Error> {
async fn metadata_at_version(&self, version: u32, at: HashFor<T>) -> Result<Metadata, Error> {
let param = version.encode();
let opaque: Option<frame_metadata::OpaqueMetadata> = self
@@ -196,7 +199,7 @@ pub trait BackendExt<T: Config>: Backend<T> {
}
/// Return V14 metadata from the legacy `Metadata_metadata` call.
async fn legacy_metadata(&self, at: T::Hash) -> Result<Metadata, Error> {
async fn legacy_metadata(&self, at: HashFor<T>) -> Result<Metadata, Error> {
let opaque: frame_metadata::OpaqueMetadata =
self.call_decoding("Metadata_metadata", None, at).await?;
let metadata: Metadata = Decode::decode(&mut &opaque.0[..])?;
@@ -412,7 +415,6 @@ mod test {
// Define dummy config
enum Conf {}
impl Config for Conf {
type Hash = H256;
type AccountId = crate::utils::AccountId32;
type Address = crate::utils::MultiAddress<Self::AccountId, ()>;
type Signature = crate::utils::MultiSignature;
@@ -540,7 +542,7 @@ mod test {
/// - `call`
/// The test covers them because they follow the simple pattern of:
/// ```no_run
/// async fn THE_THING(&self) -> Result<T::Hash, Error> {
/// async fn THE_THING(&self) -> Result<HashFor<T>, Error> {
/// retry(|| <DO THE THING> ).await
/// }
/// ```
@@ -573,7 +575,7 @@ mod test {
/// ```no_run
/// async fn stream_the_thing(
/// &self,
/// ) -> Result<StreamOfResults<(T::Header, BlockRef<T::Hash>)>, Error> {
/// ) -> Result<StreamOfResults<(T::Header, BlockRef<HashFor<T>>)>, Error> {
/// let methods = self.methods.clone();
/// let retry_sub = retry_stream(move || {
/// let methods = methods.clone();
@@ -692,7 +694,7 @@ mod test {
serde_json::from_value(spec).expect("Mock runtime spec should be the right shape")
}
type FollowEvent = chain_head::FollowEvent<<Conf as Config>::Hash>;
type FollowEvent = chain_head::FollowEvent<HashFor<Conf>>;
/// Build a mock client which can handle `chainHead_v1_follow` subscriptions.
/// Messages from the provided receiver are sent to the latest active subscription.
@@ -747,7 +749,7 @@ mod test {
async move {
if let Some(id) = id {
let follow_event =
FollowEvent::Initialized(Initialized::<<Conf as Config>::Hash> {
FollowEvent::Initialized(Initialized::<HashFor<Conf>> {
finalized_block_hashes: vec![random_hash()],
finalized_block_runtime: Some(chain_head::RuntimeEvent::Valid(
RuntimeVersionEvent {
+9 -9
View File
@@ -6,7 +6,7 @@ use crate::{
backend::BlockRef,
blocks::Extrinsics,
client::{OfflineClientT, OnlineClientT},
config::{Config, Header},
config::{Config, HashFor, Header},
error::{BlockError, DecodeError, Error},
events,
runtime_api::RuntimeApi,
@@ -20,7 +20,7 @@ use std::sync::Arc;
/// A representation of a block.
pub struct Block<T: Config, C> {
header: T::Header,
block_ref: BlockRef<T::Hash>,
block_ref: BlockRef<HashFor<T>>,
client: C,
// Since we obtain the same events for every extrinsic, let's
// cache them so that we only ever do that once:
@@ -36,7 +36,7 @@ where
T: Config,
C: OfflineClientT<T>,
{
pub(crate) fn new(header: T::Header, block_ref: BlockRef<T::Hash>, client: C) -> Self {
pub(crate) fn new(header: T::Header, block_ref: BlockRef<HashFor<T>>, client: C) -> Self {
Block {
header,
block_ref,
@@ -47,12 +47,12 @@ where
/// Return a reference to the given block. While this reference is kept alive,
/// the backend will (if possible) endeavour to keep hold of the block.
pub fn reference(&self) -> BlockRef<T::Hash> {
pub fn reference(&self) -> BlockRef<HashFor<T>> {
self.block_ref.clone()
}
/// Return the block hash.
pub fn hash(&self) -> T::Hash {
pub fn hash(&self) -> HashFor<T> {
self.block_ref.hash()
}
@@ -74,12 +74,12 @@ where
{
/// Return the events associated with the block, fetching them from the node if necessary.
pub async fn events(&self) -> Result<events::Events<T>, Error> {
get_events(&self.client, self.header.hash(), &self.cached_events).await
get_events(&self.client, self.hash(), &self.cached_events).await
}
/// Fetch and return the extrinsics in the block body.
pub async fn extrinsics(&self) -> Result<Extrinsics<T, C>, Error> {
let block_hash = self.header.hash();
let block_hash = self.hash();
let Some(extrinsics) = self.client.backend().block_body(block_hash).await? else {
return Err(BlockError::not_found(block_hash).into());
};
@@ -111,7 +111,7 @@ where
// Return Events from the cache, or fetch from the node if needed.
pub(crate) async fn get_events<C, T>(
client: &C,
block_hash: T::Hash,
block_hash: HashFor<T>,
cached_events: &AsyncMutex<Option<events::Events<T>>>,
) -> Result<events::Events<T>, Error>
where
@@ -140,7 +140,7 @@ where
pub(crate) async fn get_account_nonce<C, T>(
client: &C,
account_id: &T::AccountId,
block_hash: T::Hash,
block_hash: HashFor<T>,
) -> Result<u64, Error>
where
C: OnlineClientT<T>,
+15 -7
View File
@@ -6,7 +6,7 @@ use super::Block;
use crate::{
backend::{BlockRef, StreamOfResults},
client::OnlineClientT,
config::Config,
config::{Config, HashFor},
error::{BlockError, Error},
utils::PhantomDataSendSync,
};
@@ -48,7 +48,7 @@ where
/// but may run into errors attempting to work with them.
pub fn at(
&self,
block_ref: impl Into<BlockRef<T::Hash>>,
block_ref: impl Into<BlockRef<HashFor<T>>>,
) -> impl Future<Output = Result<Block<T, Client>, Error>> + Send + 'static {
self.at_or_latest(Some(block_ref.into()))
}
@@ -64,7 +64,7 @@ where
/// provided.
fn at_or_latest(
&self,
block_ref: Option<BlockRef<T::Hash>>,
block_ref: Option<BlockRef<HashFor<T>>>,
) -> impl Future<Output = Result<Block<T, Client>, Error>> + Send + 'static {
let client = self.client.clone();
async move {
@@ -94,8 +94,9 @@ where
Client: Send + Sync + 'static,
{
let client = self.client.clone();
let hasher = client.hasher();
header_sub_fut_to_block_sub(self.clone(), async move {
let stream = client.backend().stream_all_block_headers().await?;
let stream = client.backend().stream_all_block_headers(hasher).await?;
BlockStreamRes::Ok(stream)
})
}
@@ -111,8 +112,9 @@ where
Client: Send + Sync + 'static,
{
let client = self.client.clone();
let hasher = client.hasher();
header_sub_fut_to_block_sub(self.clone(), async move {
let stream = client.backend().stream_best_block_headers().await?;
let stream = client.backend().stream_best_block_headers(hasher).await?;
BlockStreamRes::Ok(stream)
})
}
@@ -125,8 +127,12 @@ where
Client: Send + Sync + 'static,
{
let client = self.client.clone();
let hasher = client.hasher();
header_sub_fut_to_block_sub(self.clone(), async move {
let stream = client.backend().stream_finalized_block_headers().await?;
let stream = client
.backend()
.stream_finalized_block_headers(hasher)
.await?;
BlockStreamRes::Ok(stream)
})
}
@@ -140,7 +146,9 @@ async fn header_sub_fut_to_block_sub<T, Client, S>(
) -> Result<BlockStream<Block<T, Client>>, Error>
where
T: Config,
S: Future<Output = Result<BlockStream<(T::Header, BlockRef<T::Hash>)>, Error>> + Send + 'static,
S: Future<Output = Result<BlockStream<(T::Header, BlockRef<HashFor<T>>)>, Error>>
+ Send
+ 'static,
Client: OnlineClientT<T> + Send + Sync + 'static,
{
let sub = sub.await?.then(move |header_and_ref| {
+10 -10
View File
@@ -5,7 +5,7 @@
use crate::{
blocks::block_types::{get_events, CachedEvents},
client::{OfflineClientT, OnlineClientT},
config::Config,
config::{Config, HashFor},
error::Error,
events,
};
@@ -25,7 +25,7 @@ pub struct Extrinsics<T: Config, C> {
inner: CoreExtrinsics<T>,
client: C,
cached_events: CachedEvents<T>,
hash: T::Hash,
hash: HashFor<T>,
}
impl<T, C> Extrinsics<T, C>
@@ -37,7 +37,7 @@ where
client: C,
extrinsics: Vec<Vec<u8>>,
cached_events: CachedEvents<T>,
hash: T::Hash,
hash: HashFor<T>,
) -> Result<Self, Error> {
let inner = CoreExtrinsics::decode_from(extrinsics, client.metadata())?;
Ok(Self {
@@ -59,7 +59,7 @@ where
}
/// Return the block hash that these extrinsics are from.
pub fn block_hash(&self) -> T::Hash {
pub fn block_hash(&self) -> HashFor<T> {
self.hash
}
@@ -125,7 +125,7 @@ where
pub struct ExtrinsicDetails<T: Config, C> {
inner: CoreExtrinsicDetails<T>,
/// The block hash of this extrinsic (needed to fetch events).
block_hash: T::Hash,
block_hash: HashFor<T>,
/// Subxt client.
client: C,
/// Cached events.
@@ -141,7 +141,7 @@ where
pub(crate) fn new(
inner: CoreExtrinsicDetails<T>,
client: C,
block_hash: T::Hash,
block_hash: HashFor<T>,
cached_events: CachedEvents<T>,
) -> ExtrinsicDetails<T, C> {
ExtrinsicDetails {
@@ -153,7 +153,7 @@ where
}
/// See [`subxt_core::blocks::ExtrinsicDetails::hash()`].
pub fn hash(&self) -> T::Hash {
pub fn hash(&self) -> HashFor<T> {
self.inner.hash()
}
@@ -271,7 +271,7 @@ pub struct ExtrinsicEvents<T: Config> {
// this type is returned from TxProgress things in the most
// basic flows, so it's the only place people can access it
// without complicating things for themselves).
ext_hash: T::Hash,
ext_hash: HashFor<T>,
// The index of the extrinsic:
idx: u32,
// All of the events in the block:
@@ -281,7 +281,7 @@ pub struct ExtrinsicEvents<T: Config> {
impl<T: Config> ExtrinsicEvents<T> {
/// Creates a new instance of `ExtrinsicEvents`.
#[doc(hidden)]
pub fn new(ext_hash: T::Hash, idx: u32, events: events::Events<T>) -> Self {
pub fn new(ext_hash: HashFor<T>, idx: u32, events: events::Events<T>) -> Self {
Self {
ext_hash,
idx,
@@ -295,7 +295,7 @@ impl<T: Config> ExtrinsicEvents<T> {
}
/// Return the hash of the extrinsic.
pub fn extrinsic_hash(&self) -> T::Hash {
pub fn extrinsic_hash(&self) -> HashFor<T> {
self.ext_hash
}
+26 -6
View File
@@ -4,8 +4,14 @@
use crate::custom_values::CustomValuesClient;
use crate::{
blocks::BlocksClient, constants::ConstantsClient, events::EventsClient,
runtime_api::RuntimeApiClient, storage::StorageClient, tx::TxClient, Config, Metadata,
blocks::BlocksClient,
config::{Config, HashFor},
constants::ConstantsClient,
events::EventsClient,
runtime_api::RuntimeApiClient,
storage::StorageClient,
tx::TxClient,
Metadata,
};
use derive_where::derive_where;
@@ -19,11 +25,14 @@ pub trait OfflineClientT<T: Config>: Clone + Send + Sync + 'static {
fn metadata(&self) -> Metadata;
/// Return the provided genesis hash.
fn genesis_hash(&self) -> T::Hash;
fn genesis_hash(&self) -> HashFor<T>;
/// Return the provided [`RuntimeVersion`].
fn runtime_version(&self) -> RuntimeVersion;
/// Return the hasher used on the chain.
fn hasher(&self) -> T::Hasher;
/// Return the [subxt_core::client::ClientState] (metadata, runtime version and genesis hash).
fn client_state(&self) -> ClientState<T> {
ClientState {
@@ -74,19 +83,22 @@ pub trait OfflineClientT<T: Config>: Clone + Send + Sync + 'static {
#[derive_where(Debug, Clone)]
pub struct OfflineClient<T: Config> {
inner: Arc<ClientState<T>>,
hasher: T::Hasher,
}
impl<T: Config> OfflineClient<T> {
/// Construct a new [`OfflineClient`], providing
/// the necessary runtime and compile-time arguments.
pub fn new(
genesis_hash: T::Hash,
genesis_hash: HashFor<T>,
runtime_version: RuntimeVersion,
metadata: impl Into<Metadata>,
) -> OfflineClient<T> {
let metadata = metadata.into();
let hasher = <T::Hasher as subxt_core::config::Hasher>::new(&metadata);
OfflineClient {
hasher,
inner: Arc::new(ClientState {
genesis_hash,
runtime_version,
@@ -96,7 +108,7 @@ impl<T: Config> OfflineClient<T> {
}
/// Return the genesis hash.
pub fn genesis_hash(&self) -> T::Hash {
pub fn genesis_hash(&self) -> HashFor<T> {
self.inner.genesis_hash
}
@@ -110,6 +122,11 @@ impl<T: Config> OfflineClient<T> {
self.inner.metadata.clone()
}
/// Return the hasher used for the chain.
pub fn hasher(&self) -> T::Hasher {
self.hasher
}
// Just a copy of the most important trait methods so that people
// don't need to import the trait for most things:
@@ -140,7 +157,7 @@ impl<T: Config> OfflineClient<T> {
}
impl<T: Config> OfflineClientT<T> for OfflineClient<T> {
fn genesis_hash(&self) -> T::Hash {
fn genesis_hash(&self) -> HashFor<T> {
self.genesis_hash()
}
fn runtime_version(&self) -> RuntimeVersion {
@@ -149,6 +166,9 @@ impl<T: Config> OfflineClientT<T> for OfflineClient<T> {
fn metadata(&self) -> Metadata {
self.metadata()
}
fn hasher(&self) -> T::Hasher {
self.hasher()
}
}
// For ergonomics; cloning a client is deliberately fairly cheap (via Arc),
+38 -12
View File
@@ -7,13 +7,14 @@ use crate::custom_values::CustomValuesClient;
use crate::{
backend::{legacy::LegacyBackend, rpc::RpcClient, Backend, BackendExt, StreamOfResults},
blocks::{BlockRef, BlocksClient},
config::{Config, HashFor},
constants::ConstantsClient,
error::Error,
events::EventsClient,
runtime_api::RuntimeApiClient,
storage::StorageClient,
tx::TxClient,
Config, Metadata,
Metadata,
};
use derive_where::derive_where;
use futures::future;
@@ -37,9 +38,10 @@ pub struct OnlineClient<T: Config> {
#[derive_where(Debug)]
struct Inner<T: Config> {
genesis_hash: T::Hash,
genesis_hash: HashFor<T>,
runtime_version: RuntimeVersion,
metadata: Metadata,
hasher: T::Hasher,
}
impl<T: Config> std::fmt::Debug for OnlineClient<T> {
@@ -103,7 +105,7 @@ impl<T: Config> OnlineClient<T> {
/// If you're unsure what you're doing, prefer one of the alternate methods to instantiate
/// a client.
pub fn from_rpc_client_with(
genesis_hash: T::Hash,
genesis_hash: HashFor<T>,
runtime_version: RuntimeVersion,
metadata: impl Into<Metadata>,
rpc_client: impl Into<RpcClient>,
@@ -141,16 +143,22 @@ impl<T: Config> OnlineClient<T> {
/// If you're unsure what you're doing, prefer one of the alternate methods to instantiate
/// a client.
pub fn from_backend_with<B: Backend<T>>(
genesis_hash: T::Hash,
genesis_hash: HashFor<T>,
runtime_version: RuntimeVersion,
metadata: impl Into<Metadata>,
backend: Arc<B>,
) -> Result<OnlineClient<T>, Error> {
use subxt_core::config::Hasher;
let metadata = metadata.into();
let hasher = T::Hasher::new(&metadata);
Ok(OnlineClient {
inner: Arc::new(RwLock::new(Inner {
genesis_hash,
runtime_version,
metadata: metadata.into(),
metadata,
hasher,
})),
backend,
})
@@ -159,7 +167,7 @@ impl<T: Config> OnlineClient<T> {
/// Fetch the metadata from substrate using the runtime API.
async fn fetch_metadata(
backend: &dyn Backend<T>,
block_hash: T::Hash,
block_hash: HashFor<T>,
) -> Result<Metadata, Error> {
#[cfg(feature = "unstable-metadata")]
{
@@ -184,7 +192,7 @@ impl<T: Config> OnlineClient<T> {
/// Fetch the latest stable metadata from the node.
async fn fetch_latest_stable_metadata(
backend: &dyn Backend<T>,
block_hash: T::Hash,
block_hash: HashFor<T>,
) -> Result<Metadata, Error> {
// This is the latest stable metadata that subxt can utilize.
const V15_METADATA_VERSION: u32 = 15;
@@ -246,6 +254,11 @@ impl<T: Config> OnlineClient<T> {
ClientRuntimeUpdater(self.clone())
}
/// Return the hasher configured for hashing blocks and extrinsics.
pub fn hasher(&self) -> T::Hasher {
self.inner.read().expect("shouldn't be poisoned").hasher
}
/// Return the [`Metadata`] used in this client.
pub fn metadata(&self) -> Metadata {
let inner = self.inner.read().expect("shouldn't be poisoned");
@@ -264,7 +277,7 @@ impl<T: Config> OnlineClient<T> {
}
/// Return the genesis hash.
pub fn genesis_hash(&self) -> T::Hash {
pub fn genesis_hash(&self) -> HashFor<T> {
let inner = self.inner.read().expect("shouldn't be poisoned");
inner.genesis_hash
}
@@ -275,7 +288,7 @@ impl<T: Config> OnlineClient<T> {
///
/// Setting a custom genesis hash may leave Subxt unable to
/// submit valid transactions.
pub fn set_genesis_hash(&self, genesis_hash: T::Hash) {
pub fn set_genesis_hash(&self, genesis_hash: HashFor<T>) {
let mut inner = self.inner.write().expect("shouldn't be poisoned");
inner.genesis_hash = genesis_hash;
}
@@ -355,12 +368,15 @@ impl<T: Config> OfflineClientT<T> for OnlineClient<T> {
fn metadata(&self) -> Metadata {
self.metadata()
}
fn genesis_hash(&self) -> T::Hash {
fn genesis_hash(&self) -> HashFor<T> {
self.genesis_hash()
}
fn runtime_version(&self) -> RuntimeVersion {
self.runtime_version()
}
fn hasher(&self) -> T::Hasher {
self.hasher()
}
// This is provided by default, but we can optimise here and only lock once:
fn client_state(&self) -> ClientState<T> {
let inner = self.inner.read().expect("shouldn't be poisoned");
@@ -501,10 +517,20 @@ impl Update {
async fn wait_runtime_upgrade_in_finalized_block<T: Config>(
client: &OnlineClient<T>,
runtime_version: &RuntimeVersion,
) -> Option<Result<BlockRef<T::Hash>, Error>> {
) -> Option<Result<BlockRef<HashFor<T>>, Error>> {
use scale_value::At;
let mut block_sub = match client.backend().stream_finalized_block_headers().await {
let hasher = client
.inner
.read()
.expect("Lock shouldn't be poisoned")
.hasher;
let mut block_sub = match client
.backend()
.stream_finalized_block_headers(hasher)
.await
{
Ok(s) => s,
Err(err) => return Some(Err(err)),
};
+9 -4
View File
@@ -3,7 +3,12 @@
// see LICENSE for license details.
use crate::backend::{Backend, BackendExt, BlockRef};
use crate::{client::OnlineClientT, error::Error, events::Events, Config};
use crate::{
client::OnlineClientT,
config::{Config, HashFor},
error::Error,
events::Events,
};
use derive_where::derive_where;
use std::future::Future;
@@ -38,7 +43,7 @@ where
/// but may run into errors attempting to work with them.
pub fn at(
&self,
block_ref: impl Into<BlockRef<T::Hash>>,
block_ref: impl Into<BlockRef<HashFor<T>>>,
) -> impl Future<Output = Result<Events<T>, Error>> + Send + 'static {
self.at_or_latest(Some(block_ref.into()))
}
@@ -51,7 +56,7 @@ where
/// Obtain events at some block hash.
fn at_or_latest(
&self,
block_ref: Option<BlockRef<T::Hash>>,
block_ref: Option<BlockRef<HashFor<T>>>,
) -> impl Future<Output = Result<Events<T>, Error>> + Send + 'static {
// Clone and pass the client in like this so that we can explicitly
// return a Future that's Send + 'static, rather than tied to &self.
@@ -82,7 +87,7 @@ fn system_events_key() -> [u8; 32] {
// Get the event bytes from the provided client, at the provided block hash.
pub(crate) async fn get_event_bytes<T: Config>(
backend: &dyn Backend<T>,
block_hash: T::Hash,
block_hash: HashFor<T>,
) -> Result<Vec<u8>, Error> {
Ok(backend
.storage_fetch_value(system_events_key().to_vec(), block_hash)
+5 -2
View File
@@ -1,4 +1,7 @@
use crate::{Config, Error, Metadata};
use crate::{
config::{Config, HashFor},
Error, Metadata,
};
use derive_where::derive_where;
use scale_decode::DecodeAsType;
use subxt_core::events::{EventDetails as CoreEventDetails, Events as CoreEvents};
@@ -153,7 +156,7 @@ impl<T: Config> EventDetails<T> {
}
/// Return the topics associated with this event.
pub fn topics(&self) -> &[T::Hash] {
pub fn topics(&self) -> &[HashFor<T>] {
self.inner.topics()
}
}
+5 -2
View File
@@ -11,7 +11,10 @@ mod events_type;
use crate::client::OnlineClientT;
use crate::Error;
use subxt_core::{Config, Metadata};
use subxt_core::{
config::{Config, HashFor},
Metadata,
};
pub use events_client::EventsClient;
pub use events_type::{EventDetails, EventMetadataDetails, Events, Phase, StaticEvent};
@@ -19,7 +22,7 @@ pub use events_type::{EventDetails, EventMetadataDetails, Events, Phase, StaticE
/// Creates a new [`Events`] instance by fetching the corresponding bytes at `block_hash` from the client.
pub async fn new_events_from_client<T, C>(
metadata: Metadata,
block_hash: T::Hash,
block_hash: HashFor<T>,
client: C,
) -> Result<Events<T>, Error>
where
+4 -4
View File
@@ -57,10 +57,10 @@ pub mod utils;
/// Polkadot node.
pub mod config {
pub use subxt_core::config::{
polkadot, substrate, transaction_extensions, BlockHash, Config, DefaultExtrinsicParams,
DefaultExtrinsicParamsBuilder, ExtrinsicParams, ExtrinsicParamsEncoder, Hasher, Header,
PolkadotConfig, PolkadotExtrinsicParams, SubstrateConfig, SubstrateExtrinsicParams,
TransactionExtension,
polkadot, substrate, transaction_extensions, Config, DefaultExtrinsicParams,
DefaultExtrinsicParamsBuilder, ExtrinsicParams, ExtrinsicParamsEncoder, Hash, HashFor,
Hasher, Header, PolkadotConfig, PolkadotExtrinsicParams, SubstrateConfig,
SubstrateExtrinsicParams, TransactionExtension,
};
pub use subxt_core::error::ExtrinsicParamsError;
}
+7 -2
View File
@@ -4,7 +4,12 @@
use super::runtime_types::RuntimeApi;
use crate::{backend::BlockRef, client::OnlineClientT, error::Error, Config};
use crate::{
backend::BlockRef,
client::OnlineClientT,
config::{Config, HashFor},
error::Error,
};
use derive_where::derive_where;
use std::{future::Future, marker::PhantomData};
@@ -31,7 +36,7 @@ where
Client: OnlineClientT<T>,
{
/// Obtain a runtime API interface at some block hash.
pub fn at(&self, block_ref: impl Into<BlockRef<T::Hash>>) -> RuntimeApi<T, Client> {
pub fn at(&self, block_ref: impl Into<BlockRef<HashFor<T>>>) -> RuntimeApi<T, Client> {
RuntimeApi::new(self.client.clone(), block_ref.into())
}
+3 -3
View File
@@ -6,8 +6,8 @@ use super::Payload;
use crate::{
backend::{BackendExt, BlockRef},
client::OnlineClientT,
config::{Config, HashFor},
error::Error,
Config,
};
use codec::Decode;
use derive_where::derive_where;
@@ -17,13 +17,13 @@ use std::{future::Future, marker::PhantomData};
#[derive_where(Clone; Client)]
pub struct RuntimeApi<T: Config, Client> {
client: Client,
block_ref: BlockRef<T::Hash>,
block_ref: BlockRef<HashFor<T>>,
_marker: PhantomData<T>,
}
impl<T: Config, Client> RuntimeApi<T, Client> {
/// Create a new [`RuntimeApi`]
pub(crate) fn new(client: Client, block_ref: BlockRef<T::Hash>) -> Self {
pub(crate) fn new(client: Client, block_ref: BlockRef<HashFor<T>>) -> Self {
Self {
client,
block_ref,
+2 -2
View File
@@ -6,8 +6,8 @@ use super::storage_type::Storage;
use crate::{
backend::BlockRef,
client::{OfflineClientT, OnlineClientT},
config::{Config, HashFor},
error::Error,
Config,
};
use derive_where::derive_where;
use std::{future::Future, marker::PhantomData};
@@ -65,7 +65,7 @@ where
Client: OnlineClientT<T>,
{
/// Obtain storage at some block hash.
pub fn at(&self, block_ref: impl Into<BlockRef<T::Hash>>) -> Storage<T, Client> {
pub fn at(&self, block_ref: impl Into<BlockRef<HashFor<T>>>) -> Storage<T, Client> {
Storage::new(self.client.clone(), block_ref.into())
}
+3 -3
View File
@@ -5,9 +5,9 @@
use crate::{
backend::{BackendExt, BlockRef},
client::OnlineClientT,
config::{Config, HashFor},
error::{Error, MetadataError, StorageAddressError},
metadata::DecodeWithMetadata,
Config,
};
use codec::Decode;
use derive_where::derive_where;
@@ -23,13 +23,13 @@ pub use crate::backend::StreamOfResults;
#[derive_where(Clone; Client)]
pub struct Storage<T: Config, Client> {
client: Client,
block_ref: BlockRef<T::Hash>,
block_ref: BlockRef<HashFor<T>>,
_marker: PhantomData<T>,
}
impl<T: Config, Client> Storage<T, Client> {
/// Create a new [`Storage`]
pub(crate) fn new(client: Client, block_ref: BlockRef<T::Hash>) -> Self {
pub(crate) fn new(client: Client, block_ref: BlockRef<HashFor<T>>) -> Self {
Self {
client,
block_ref,
+8 -8
View File
@@ -5,7 +5,7 @@
use crate::{
backend::{BackendExt, BlockRef, TransactionStatus},
client::{OfflineClientT, OnlineClientT},
config::{Config, ExtrinsicParams, Header},
config::{Config, ExtrinsicParams, HashFor, Header},
error::{BlockError, Error},
tx::{Payload, Signer as SignerT, TxProgress},
utils::PhantomDataSendSync,
@@ -321,7 +321,7 @@ where
&mut self,
call: &Call,
signer: &Signer,
) -> Result<T::Hash, Error>
) -> Result<HashFor<T>, Error>
where
Call: Payload,
Signer: SignerT<T>,
@@ -344,7 +344,7 @@ where
call: &Call,
signer: &Signer,
params: <T::ExtrinsicParams as ExtrinsicParams<T>>::Params,
) -> Result<T::Hash, Error>
) -> Result<HashFor<T>, Error>
where
Call: Payload,
Signer: SignerT<T>,
@@ -459,8 +459,8 @@ where
}
/// Calculate and return the hash of the transaction, based on the configured hasher.
pub fn hash(&self) -> T::Hash {
self.inner.hash()
pub fn hash(&self) -> HashFor<T> {
self.inner.hash_with(self.client.hasher())
}
/// Returns the SCALE encoded transaction bytes.
@@ -503,7 +503,7 @@ where
/// It's usually better to call `submit_and_watch` to get an idea of the progress of the
/// submission and whether it's eventually successful or not. This call does not guarantee
/// success, and is just sending the transaction to the chain.
pub async fn submit(&self) -> Result<T::Hash, Error> {
pub async fn submit(&self) -> Result<HashFor<T>, Error> {
let ext_hash = self.hash();
let mut sub = self
.client
@@ -551,7 +551,7 @@ where
/// Returns `Ok` with a [`ValidationResult`], which is the result of attempting to dry run the transaction.
pub async fn validate_at(
&self,
at: impl Into<BlockRef<T::Hash>>,
at: impl Into<BlockRef<HashFor<T>>>,
) -> Result<ValidationResult, Error> {
let block_hash = at.into().hash();
@@ -614,7 +614,7 @@ async fn inject_account_nonce_and_block<T: Config, Client: OnlineClientT<T>>(
crate::blocks::get_account_nonce(client, account_id, block_ref.hash()).await?;
params.inject_account_nonce(account_nonce);
params.inject_block(block_header.number().into(), block_header.hash());
params.inject_block(block_header.number().into(), block_ref.hash());
Ok(())
}
+21 -14
View File
@@ -9,18 +9,18 @@ use std::task::Poll;
use crate::{
backend::{BlockRef, StreamOfResults, TransactionStatus as BackendTxStatus},
client::OnlineClientT,
config::{Config, HashFor},
error::{DispatchError, Error, RpcError, TransactionError},
events::EventsClient,
utils::strip_compact_prefix,
Config,
};
use derive_where::derive_where;
use futures::{Stream, StreamExt};
/// This struct represents a subscription to the progress of some transaction.
pub struct TxProgress<T: Config, C> {
sub: Option<StreamOfResults<BackendTxStatus<T::Hash>>>,
ext_hash: T::Hash,
sub: Option<StreamOfResults<BackendTxStatus<HashFor<T>>>>,
ext_hash: HashFor<T>,
client: C,
}
@@ -42,9 +42,9 @@ impl<T: Config, C> Unpin for TxProgress<T, C> {}
impl<T: Config, C> TxProgress<T, C> {
/// Instantiate a new [`TxProgress`] from a custom subscription.
pub fn new(
sub: StreamOfResults<BackendTxStatus<T::Hash>>,
sub: StreamOfResults<BackendTxStatus<HashFor<T>>>,
client: C,
ext_hash: T::Hash,
ext_hash: HashFor<T>,
) -> Self {
Self {
sub: Some(sub),
@@ -54,7 +54,7 @@ impl<T: Config, C> TxProgress<T, C> {
}
/// Return the hash of the extrinsic.
pub fn extrinsic_hash(&self) -> T::Hash {
pub fn extrinsic_hash(&self) -> HashFor<T> {
self.ext_hash
}
}
@@ -219,13 +219,13 @@ impl<T: Config, C> TxStatus<T, C> {
/// This struct represents a transaction that has made it into a block.
#[derive_where(Debug; C)]
pub struct TxInBlock<T: Config, C> {
block_ref: BlockRef<T::Hash>,
ext_hash: T::Hash,
block_ref: BlockRef<HashFor<T>>,
ext_hash: HashFor<T>,
client: C,
}
impl<T: Config, C> TxInBlock<T, C> {
pub(crate) fn new(block_ref: BlockRef<T::Hash>, ext_hash: T::Hash, client: C) -> Self {
pub(crate) fn new(block_ref: BlockRef<HashFor<T>>, ext_hash: HashFor<T>, client: C) -> Self {
Self {
block_ref,
ext_hash,
@@ -234,12 +234,12 @@ impl<T: Config, C> TxInBlock<T, C> {
}
/// Return the hash of the block that the transaction has made it into.
pub fn block_hash(&self) -> T::Hash {
pub fn block_hash(&self) -> HashFor<T> {
self.block_ref.hash()
}
/// Return the hash of the extrinsic that was submitted.
pub fn extrinsic_hash(&self) -> T::Hash {
pub fn extrinsic_hash(&self) -> HashFor<T> {
self.ext_hash
}
}
@@ -281,6 +281,8 @@ impl<T: Config, C: OnlineClientT<T>> TxInBlock<T, C> {
/// **Note:** This has to download block details from the node and decode events
/// from them.
pub async fn fetch_events(&self) -> Result<crate::blocks::ExtrinsicEvents<T>, Error> {
let hasher = self.client.hasher();
let block_body = self
.client
.backend()
@@ -295,7 +297,7 @@ impl<T: Config, C: OnlineClientT<T>> TxInBlock<T, C> {
let Ok((_, stripped)) = strip_compact_prefix(ext) else {
return false;
};
let hash = T::Hasher::hash_of(&stripped);
let hash = hasher.hash_of(&stripped);
hash == self.ext_hash
})
// If we successfully obtain the block hash we think contains our
@@ -321,12 +323,13 @@ mod test {
use crate::{
backend::{StreamOfResults, TransactionStatus},
client::{OfflineClientT, OnlineClientT},
config::{Config, HashFor},
tx::TxProgress,
Config, Error, SubstrateConfig,
Error, SubstrateConfig,
};
type MockTxProgress = TxProgress<SubstrateConfig, MockClient>;
type MockHash = <SubstrateConfig as Config>::Hash;
type MockHash = HashFor<SubstrateConfig>;
type MockSubstrateTxStatus = TransactionStatus<MockHash>;
/// a mock client to satisfy trait bounds in tests
@@ -346,6 +349,10 @@ mod test {
unimplemented!("just a mock impl to satisfy trait bounds")
}
fn hasher(&self) -> <SubstrateConfig as Config>::Hasher {
unimplemented!("just a mock impl to satisfy trait bounds")
}
fn client_state(&self) -> subxt_core::client::ClientState<SubstrateConfig> {
unimplemented!("just a mock impl to satisfy trait bounds")
}
@@ -16,7 +16,6 @@ use subxt::{
client::OnlineClient,
config::{Config, Hasher},
utils::AccountId32,
SubstrateConfig,
};
use subxt_rpcs::methods::chain_head::{
ArchiveStorageEventItem, Bytes, StorageQuery, StorageQueryType,
@@ -178,6 +177,7 @@ async fn archive_v1_storage() {
let ctx = test_context().await;
let rpc = ctx.chainhead_rpc_methods().await;
let api = ctx.client();
let hasher = api.hasher();
let mut blocks = fetch_finalized_blocks(&ctx, 3).await;
while let Some(block) = blocks.next().await {
@@ -236,9 +236,7 @@ async fn archive_v1_storage() {
ArchiveStorageEventItem {
key: Bytes(account_info_addr),
value: None,
hash: Some(<SubstrateConfig as Config>::Hasher::hash(
&subxt_account_info
)),
hash: Some(hasher.hash(&subxt_account_info)),
closest_descendant_merkle_value: None,
child_trie_key: None
}
@@ -15,7 +15,6 @@ use futures::Stream;
use subxt::{
config::Hasher,
utils::{AccountId32, MultiAddress},
SubstrateConfig,
};
use subxt_rpcs::methods::chain_head::{
FollowEvent, Initialized, MethodResponse, RuntimeEvent, RuntimeVersionEvent, StorageQuery,
@@ -329,6 +328,7 @@ async fn transaction_v1_broadcast() {
let ctx = test_context().await;
let api = ctx.client();
let hasher = api.hasher();
let rpc = ctx.chainhead_rpc_methods().await;
let tx_payload = node_runtime::tx()
@@ -374,7 +374,7 @@ async fn transaction_v1_broadcast() {
let Some(ext) = block_extrinsics
.iter()
.find(|ext| <SubstrateConfig as subxt::Config>::Hasher::hash(ext.bytes()) == tx_hash)
.find(|ext| hasher.hash(ext.bytes()) == tx_hash)
else {
continue;
};
@@ -12,7 +12,12 @@ use crate::{
subxt_test, test_context, TestClient, TestConfig, TestContext,
};
use subxt::ext::futures::StreamExt;
use subxt::{tx::TxProgress, utils::MultiAddress, Config, Error};
use subxt::{
config::{Config, HashFor},
tx::TxProgress,
utils::MultiAddress,
Error,
};
use subxt_signer::sr25519::{self, dev};
struct ContractsTestContext {
@@ -20,7 +25,7 @@ struct ContractsTestContext {
signer: sr25519::Keypair,
}
type Hash = <TestConfig as Config>::Hash;
type Hash = HashFor<TestConfig>;
type AccountId = <TestConfig as Config>::AccountId;
/// A dummy contract which does nothing at all.
+7 -3
View File
@@ -31,8 +31,9 @@ async fn wasm_ws_transport_works() {
let client = subxt::client::OnlineClient::<SubstrateConfig>::from_url("ws://127.0.0.1:9944")
.await
.unwrap();
let hasher = client.hasher();
let mut stream = client.backend().stream_best_block_headers().await.unwrap();
let mut stream = client.backend().stream_best_block_headers(hasher).await.unwrap();
assert!(stream.next().await.is_some());
}
@@ -41,8 +42,9 @@ async fn wasm_ws_chainhead_works() {
let rpc = subxt::backend::rpc::RpcClient::from_url("ws://127.0.0.1:9944").await.unwrap();
let backend = subxt::backend::chain_head::ChainHeadBackendBuilder::new().build_with_background_driver(rpc);
let client = subxt::client::OnlineClient::<SubstrateConfig>::from_backend(std::sync::Arc::new(backend)).await.unwrap();
let hasher = client.hasher();
let mut stream = client.backend().stream_best_block_headers().await.unwrap();
let mut stream = client.backend().stream_best_block_headers(hasher).await.unwrap();
assert!(stream.next().await.is_some());
}
@@ -50,7 +52,9 @@ async fn wasm_ws_chainhead_works() {
async fn reconnecting_rpc_client_ws_transport_works() {
let rpc = ReconnectingRpcClient::builder().build("ws://127.0.0.1:9944".to_string()).await.unwrap();
let client = subxt::client::OnlineClient::<SubstrateConfig>::from_rpc_client(rpc.clone()).await.unwrap();
let mut stream = client.backend().stream_best_block_headers().await.unwrap();
let hasher = client.hasher();
let mut stream = client.backend().stream_best_block_headers(hasher).await.unwrap();
assert!(stream.next().await.is_some());
}
+28 -2
View File
@@ -75,10 +75,36 @@ impl StripMetadata for v16::RuntimeMetadataV16 {
PalletFilter: Fn(&str) -> bool,
RuntimeApiFilter: Fn(&str) -> bool,
{
// Throw away pallets and runtime APIs we don't care about:
self.pallets.retain(|pallet| keep_pallet(&pallet.name));
// Throw away pallets and runtime APIs we don't care about.
// Keep the System pallet, because it has some associated types that we care about in Subxt.
self.pallets
.retain(|pallet| pallet.name == "System" || keep_pallet(&pallet.name));
self.apis.retain(|api| keep_runtime_api(&api.name));
// If the user asked to strip the System pallet, we'll strip most things from it but keep the
// associated types, because Subxt makes use of them.
if !keep_pallet("System") {
if let Some(system_pallet) = self.pallets.iter_mut().find(|p| p.name == "System") {
let index = system_pallet.index;
let associated_types = core::mem::take(&mut system_pallet.associated_types);
*system_pallet = v16::PalletMetadata {
name: "System".to_string(),
index,
associated_types,
// Everything else is empty:
storage: None,
calls: None,
event: None,
constants: vec![],
error: None,
view_functions: vec![],
docs: vec![],
deprecation_info: v16::DeprecationStatus::NotDeprecated,
};
}
}
// Now, only retain types we care about in the registry:
retain_types(self);
}