mirror of
https://github.com/pezkuwichain/pezkuwi-subxt.git
synced 2026-05-06 13:48:03 +00:00
WIP: first pass adding new storage things to subxt-core
This commit is contained in:
+10
-1
@@ -131,7 +131,7 @@ pub enum MetadataError {
|
||||
}
|
||||
|
||||
/// Something went wrong trying to encode or decode a storage address.
|
||||
#[derive(Clone, Debug, DeriveError)]
|
||||
#[derive(Debug, DeriveError)]
|
||||
#[non_exhaustive]
|
||||
pub enum StorageAddressError {
|
||||
/// Storage lookup does not have the expected number of keys.
|
||||
@@ -169,6 +169,15 @@ pub enum StorageAddressError {
|
||||
/// The invalid hasher that caused this error.
|
||||
hasher: StorageHasher,
|
||||
},
|
||||
/// Cannot obtain storage information from metadata
|
||||
#[error("Cannot obtain storage information from metadata: {0}")]
|
||||
StorageInfoError(frame_decode::storage::StorageInfoError<'static>),
|
||||
/// Cannot decode storage value
|
||||
#[error("Cannot decode storage value: {0}")]
|
||||
StorageValueDecodeError(frame_decode::storage::StorageValueDecodeError<u32>),
|
||||
/// Cannot encode storage key
|
||||
#[error("Cannot encode storage key: {0}")]
|
||||
StorageKeyEncodeError(frame_decode::storage::StorageKeyEncodeError),
|
||||
}
|
||||
|
||||
/// An error that can be encountered when constructing a transaction.
|
||||
|
||||
+55
-170
@@ -4,18 +4,10 @@
|
||||
|
||||
//! Construct addresses to access storage entries with.
|
||||
|
||||
use crate::{
|
||||
dynamic::DecodedValueThunk,
|
||||
error::{Error, MetadataError},
|
||||
metadata::{DecodeWithMetadata, Metadata},
|
||||
utils::Yes,
|
||||
};
|
||||
use scale_decode::DecodeAsType;
|
||||
use derive_where::derive_where;
|
||||
use frame_decode::storage::{IntoEncodableValues, IntoDecodableValues};
|
||||
use alloc::borrow::{Cow, ToOwned};
|
||||
use alloc::string::String;
|
||||
use alloc::borrow::Cow;
|
||||
use alloc::vec::Vec;
|
||||
use frame_decode::storage::{IntoDecodableValues, IntoEncodableValues};
|
||||
use scale_decode::DecodeAsType;
|
||||
|
||||
/// A storage address. Concrete addresses are expected to implement either [`FetchableAddress`]
|
||||
/// or [`IterableAddress`], which extends this to define fetchable and iterable storage keys.
|
||||
@@ -31,19 +23,21 @@ pub trait Address {
|
||||
/// The name of the storage entry.
|
||||
fn entry_name(&self) -> &str;
|
||||
|
||||
/// Encode the suffix of the storage key for this address
|
||||
fn encode_key_suffix(&self, metadata: &Metadata, bytes: &mut Vec<u8>) -> Result<(), Error>;
|
||||
/// Return the input key parts needed to point to this storage entry / entries.
|
||||
fn key_parts(&self) -> impl IntoEncodableValues;
|
||||
|
||||
/// Return a unique hash for this address which can be used to validate it against metadata.
|
||||
fn validation_hash(&self) -> Option<[u8; 32]>;
|
||||
}
|
||||
|
||||
/// This trait represents any storage address which points to a single value we can fetch.
|
||||
pub trait FetchableAddress: Address {
|
||||
/// Does the address have a default value defined for it.
|
||||
/// Does the address have a default value defined for it.
|
||||
/// Set to [`Yes`] to enable APIs which require one.
|
||||
type HasDefaultValue;
|
||||
}
|
||||
|
||||
/// This trait represents any storage address which points to multiple 0 or more values to iterate over.
|
||||
pub trait IterableAddress: Address {
|
||||
/// The storage key values that we'll decode for each value
|
||||
type OutputKeys: IntoDecodableValues;
|
||||
@@ -55,10 +49,10 @@ pub struct StaticFetchableAddress<KeyParts, Value, HasDefaultValue> {
|
||||
entry_name: Cow<'static, str>,
|
||||
key_parts: KeyParts,
|
||||
validation_hash: Option<[u8; 32]>,
|
||||
marker: core::marker::PhantomData<(Value, HasDefaultValue)>
|
||||
marker: core::marker::PhantomData<(Value, HasDefaultValue)>,
|
||||
}
|
||||
|
||||
impl <KeyParts, Value, HasDefaultValue> StaticFetchableAddress<KeyParts, Value, HasDefaultValue> {
|
||||
impl<KeyParts, Value, HasDefaultValue> StaticFetchableAddress<KeyParts, Value, HasDefaultValue> {
|
||||
/// Create a new [`StaticFetchableAddress`] using static strings for the pallet and call name.
|
||||
/// This is only expected to be used from codegen.
|
||||
#[doc(hidden)]
|
||||
@@ -99,22 +93,17 @@ impl <KeyParts, Value, HasDefaultValue> StaticFetchableAddress<KeyParts, Value,
|
||||
}
|
||||
}
|
||||
|
||||
impl <KeyParts, Value, HasDefaultValue> Address for StaticFetchableAddress<KeyParts, Value, HasDefaultValue>
|
||||
impl<KeyParts, Value, HasDefaultValue> Address
|
||||
for StaticFetchableAddress<KeyParts, Value, HasDefaultValue>
|
||||
where
|
||||
KeyParts: IntoEncodableValues,
|
||||
Value: DecodeAsType
|
||||
Value: DecodeAsType,
|
||||
{
|
||||
type KeyParts = KeyParts;
|
||||
type Value = Value;
|
||||
|
||||
fn encode_key_suffix(&self, metadata: &Metadata, bytes: &mut Vec<u8>) -> Result<(), Error> {
|
||||
frame_decode::storage::encode_storage_key_suffix(
|
||||
&self.pallet_name,
|
||||
&self.entry_name,
|
||||
&self.key_parts,
|
||||
metadata.types(),
|
||||
metadata
|
||||
).map_err(Into::into)
|
||||
|
||||
fn key_parts(&self) -> impl IntoEncodableValues {
|
||||
&self.key_parts
|
||||
}
|
||||
|
||||
fn pallet_name(&self) -> &str {
|
||||
@@ -130,10 +119,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl <KeyParts, Value, HasDefaultValue> FetchableAddress for StaticFetchableAddress<KeyParts, Value, HasDefaultValue>
|
||||
impl<KeyParts, Value, HasDefaultValue> FetchableAddress
|
||||
for StaticFetchableAddress<KeyParts, Value, HasDefaultValue>
|
||||
where
|
||||
KeyParts: IntoEncodableValues,
|
||||
Value: DecodeAsType
|
||||
Value: DecodeAsType,
|
||||
{
|
||||
type HasDefaultValue = HasDefaultValue;
|
||||
}
|
||||
@@ -144,10 +134,12 @@ pub struct StaticIterableAddress<InputKeyParts, OutputKeyParts, Value> {
|
||||
entry_name: Cow<'static, str>,
|
||||
input_key_parts: InputKeyParts,
|
||||
validation_hash: Option<[u8; 32]>,
|
||||
marker: core::marker::PhantomData<(OutputKeyParts, Value)>
|
||||
marker: core::marker::PhantomData<(OutputKeyParts, Value)>,
|
||||
}
|
||||
|
||||
impl <InputKeyParts, OutputKeyParts, Value> StaticIterableAddress<InputKeyParts, OutputKeyParts, Value> {
|
||||
impl<InputKeyParts, OutputKeyParts, Value>
|
||||
StaticIterableAddress<InputKeyParts, OutputKeyParts, Value>
|
||||
{
|
||||
/// Create a new [`StaticIterableAddress`] using static strings for the pallet and call name.
|
||||
/// This is only expected to be used from codegen.
|
||||
#[doc(hidden)]
|
||||
@@ -188,22 +180,17 @@ impl <InputKeyParts, OutputKeyParts, Value> StaticIterableAddress<InputKeyParts,
|
||||
}
|
||||
}
|
||||
|
||||
impl <InputKeyParts, OutputKeyParts, Value> Address for StaticIterableAddress<InputKeyParts, OutputKeyParts, Value>
|
||||
impl<InputKeyParts, OutputKeyParts, Value> Address
|
||||
for StaticIterableAddress<InputKeyParts, OutputKeyParts, Value>
|
||||
where
|
||||
InputKeyParts: IntoEncodableValues,
|
||||
Value: DecodeAsType
|
||||
Value: DecodeAsType,
|
||||
{
|
||||
type KeyParts = InputKeyParts;
|
||||
type Value = Value;
|
||||
|
||||
fn encode_key_suffix(&self, metadata: &Metadata, bytes: &mut Vec<u8>) -> Result<(), Error> {
|
||||
frame_decode::storage::encode_storage_key_suffix(
|
||||
&self.pallet_name,
|
||||
&self.entry_name,
|
||||
&self.input_key_parts,
|
||||
metadata.types(),
|
||||
metadata
|
||||
).map_err(Into::into)
|
||||
|
||||
fn key_parts(&self) -> impl IntoEncodableValues {
|
||||
&self.input_key_parts
|
||||
}
|
||||
|
||||
fn pallet_name(&self) -> &str {
|
||||
@@ -219,140 +206,38 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl <InputKeyParts, OutputKeyParts, Value> IterableAddress for StaticIterableAddress<InputKeyParts, OutputKeyParts, Value>
|
||||
impl<InputKeyParts, OutputKeyParts, Value> IterableAddress
|
||||
for StaticIterableAddress<InputKeyParts, OutputKeyParts, Value>
|
||||
where
|
||||
InputKeyParts: IntoEncodableValues,
|
||||
OutputKeyParts: IntoDecodableValues,
|
||||
Value: DecodeAsType
|
||||
Value: DecodeAsType,
|
||||
{
|
||||
type OutputKeys = OutputKeyParts;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/// A concrete storage address. This can be created from static values (ie those generated
|
||||
/// via the `subxt` macro) or dynamic values via [`dynamic`].
|
||||
#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; Keys)]
|
||||
pub struct DefaultAddress<Keys: StorageKey, ReturnTy, Fetchable, Defaultable, Iterable> {
|
||||
pallet_name: Cow<'static, str>,
|
||||
entry_name: Cow<'static, str>,
|
||||
keys: Keys,
|
||||
validation_hash: Option<[u8; 32]>,
|
||||
_marker: core::marker::PhantomData<(ReturnTy, Fetchable, Defaultable, Iterable)>,
|
||||
}
|
||||
|
||||
/// A storage address constructed by the static codegen.
|
||||
pub type StaticAddress<Keys, ReturnTy, Fetchable, Defaultable, Iterable> =
|
||||
DefaultAddress<Keys, ReturnTy, Fetchable, Defaultable, Iterable>;
|
||||
/// A typical storage address constructed at runtime rather than via the `subxt` macro; this
|
||||
/// has no restriction on what it can be used for (since we don't statically know).
|
||||
pub type DynamicAddress<Keys> = DefaultAddress<Keys, DecodedValueThunk, Yes, Yes, Yes>;
|
||||
|
||||
impl<Keys: StorageKey> DynamicAddress<Keys> {
|
||||
/// Creates a new dynamic address. As `Keys` you can use a `Vec<scale_value::Value>`
|
||||
pub fn new(pallet_name: impl Into<String>, entry_name: impl Into<String>, keys: Keys) -> Self {
|
||||
Self {
|
||||
pallet_name: Cow::Owned(pallet_name.into()),
|
||||
entry_name: Cow::Owned(entry_name.into()),
|
||||
keys,
|
||||
validation_hash: None,
|
||||
_marker: core::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Keys, ReturnTy, Fetchable, Defaultable, Iterable>
|
||||
DefaultAddress<Keys, ReturnTy, Fetchable, Defaultable, Iterable>
|
||||
where
|
||||
Keys: StorageKey,
|
||||
ReturnTy: DecodeWithMetadata,
|
||||
{
|
||||
/// Create a new [`Address`] using static strings for the pallet and call name.
|
||||
/// This is only expected to be used from codegen.
|
||||
#[doc(hidden)]
|
||||
pub fn new_static(
|
||||
pallet_name: &'static str,
|
||||
entry_name: &'static str,
|
||||
keys: Keys,
|
||||
hash: [u8; 32],
|
||||
) -> Self {
|
||||
Self {
|
||||
pallet_name: Cow::Borrowed(pallet_name),
|
||||
entry_name: Cow::Borrowed(entry_name),
|
||||
keys,
|
||||
validation_hash: Some(hash),
|
||||
_marker: core::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Keys, ReturnTy, Fetchable, Defaultable, Iterable>
|
||||
DefaultAddress<Keys, ReturnTy, Fetchable, Defaultable, Iterable>
|
||||
where
|
||||
Keys: StorageKey,
|
||||
ReturnTy: DecodeWithMetadata,
|
||||
{
|
||||
/// Do not validate this storage entry prior to accessing it.
|
||||
pub fn unvalidated(self) -> Self {
|
||||
Self {
|
||||
validation_hash: None,
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
/// Return bytes representing the root of this storage entry (a hash of the pallet and entry name).
|
||||
pub fn to_root_bytes(&self) -> Vec<u8> {
|
||||
super::get_address_root_bytes(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Keys, ReturnTy, Fetchable, Defaultable, Iterable> Address
|
||||
for DefaultAddress<Keys, ReturnTy, Fetchable, Defaultable, Iterable>
|
||||
where
|
||||
Keys: StorageKey,
|
||||
ReturnTy: DecodeWithMetadata,
|
||||
{
|
||||
type Target = ReturnTy;
|
||||
type Keys = Keys;
|
||||
type IsFetchable = Fetchable;
|
||||
type IsDefaultable = Defaultable;
|
||||
type IsIterable = Iterable;
|
||||
|
||||
fn pallet_name(&self) -> &str {
|
||||
&self.pallet_name
|
||||
}
|
||||
|
||||
fn entry_name(&self) -> &str {
|
||||
&self.entry_name
|
||||
}
|
||||
|
||||
fn append_entry_bytes(&self, metadata: &Metadata, bytes: &mut Vec<u8>) -> Result<(), Error> {
|
||||
let pallet = metadata.pallet_by_name_err(self.pallet_name())?;
|
||||
let storage = pallet
|
||||
.storage()
|
||||
.ok_or_else(|| MetadataError::StorageNotFoundInPallet(self.pallet_name().to_owned()))?;
|
||||
let entry = storage
|
||||
.entry_by_name(self.entry_name())
|
||||
.ok_or_else(|| MetadataError::StorageEntryNotFound(self.entry_name().to_owned()))?;
|
||||
|
||||
let hashers = StorageHashers::new(entry.entry_type(), metadata.types())?;
|
||||
self.keys
|
||||
.encode_storage_key(bytes, &mut hashers.iter(), metadata.types())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validation_hash(&self) -> Option<[u8; 32]> {
|
||||
self.validation_hash
|
||||
}
|
||||
}
|
||||
|
||||
/// Construct a new dynamic storage lookup.
|
||||
pub fn dynamic<Keys: StorageKey>(
|
||||
pallet_name: impl Into<String>,
|
||||
entry_name: impl Into<String>,
|
||||
/// Construct a new dynamic storage fetch address.
|
||||
pub fn dynamic_fetch<Keys: IntoEncodableValues>(
|
||||
pallet_name: impl Into<Cow<'static, str>>,
|
||||
entry_name: impl Into<Cow<'static, str>>,
|
||||
storage_entry_keys: Keys,
|
||||
) -> DynamicAddress<Keys> {
|
||||
DynamicAddress::new(pallet_name, entry_name, storage_entry_keys)
|
||||
) -> impl FetchableAddress {
|
||||
StaticFetchableAddress::<Keys, scale_value::Value, ()>::new(
|
||||
pallet_name,
|
||||
entry_name,
|
||||
storage_entry_keys,
|
||||
)
|
||||
}
|
||||
|
||||
/// Construct a new dynamic storage iter address.
|
||||
pub fn dynamic_iter<Keys: IntoEncodableValues>(
|
||||
pallet_name: impl Into<Cow<'static, str>>,
|
||||
entry_name: impl Into<Cow<'static, str>>,
|
||||
storage_entry_keys: Keys,
|
||||
) -> impl IterableAddress {
|
||||
StaticIterableAddress::<Keys, Vec<scale_value::Value>, scale_value::Value>::new(
|
||||
pallet_name,
|
||||
entry_name,
|
||||
storage_entry_keys,
|
||||
)
|
||||
}
|
||||
|
||||
+38
-36
@@ -41,18 +41,16 @@
|
||||
//! println!("Alice's account info: {value:?}");
|
||||
//! ```
|
||||
|
||||
mod storage_key;
|
||||
mod utils;
|
||||
|
||||
pub mod address;
|
||||
|
||||
use crate::{Error, Metadata, error::MetadataError, metadata::DecodeWithMetadata};
|
||||
use crate::{
|
||||
Error, Metadata,
|
||||
error::{MetadataError, StorageAddressError},
|
||||
};
|
||||
use address::Address;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
// This isn't a part of the public API, but expose here because it's useful in Subxt.
|
||||
#[doc(hidden)]
|
||||
pub use utils::lookup_storage_entry_details;
|
||||
use frame_decode::storage::StorageTypeInfo;
|
||||
use scale_decode::IntoVisitor;
|
||||
|
||||
/// When the provided `address` is statically generated via the `#[subxt]` macro, this validates
|
||||
/// that the shape of the storage value is the same as the shape expected by the static address.
|
||||
@@ -84,19 +82,21 @@ pub fn get_address_bytes<Addr: Address>(
|
||||
address: &Addr,
|
||||
metadata: &Metadata,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
let mut bytes = Vec::new();
|
||||
utils::write_storage_address_root_bytes(address, &mut bytes);
|
||||
address.append_entry_bytes(metadata, &mut bytes)?;
|
||||
Ok(bytes)
|
||||
frame_decode::storage::encode_storage_key(
|
||||
address.pallet_name(),
|
||||
address.entry_name(),
|
||||
&address.key_parts(),
|
||||
&**metadata,
|
||||
metadata.types(),
|
||||
)
|
||||
.map_err(|e| StorageAddressError::StorageKeyEncodeError(e).into())
|
||||
}
|
||||
|
||||
/// Given a storage address and some metadata, this encodes the root of the address (ie the pallet
|
||||
/// and storage entry part) into bytes. If the entry being addressed is inside a map, this returns
|
||||
/// the bytes needed to iterate over all of the entries within it.
|
||||
pub fn get_address_root_bytes<Addr: Address>(address: &Addr) -> Vec<u8> {
|
||||
let mut bytes = Vec::new();
|
||||
utils::write_storage_address_root_bytes(address, &mut bytes);
|
||||
bytes
|
||||
pub fn get_address_root_bytes<Addr: Address>(address: &Addr) -> [u8; 32] {
|
||||
frame_decode::storage::encode_storage_key_prefix(address.pallet_name(), address.entry_name())
|
||||
}
|
||||
|
||||
/// Given some storage value that we've retrieved from a node, the address used to retrieve it, and
|
||||
@@ -106,30 +106,32 @@ pub fn decode_value<Addr: Address>(
|
||||
bytes: &mut &[u8],
|
||||
address: &Addr,
|
||||
metadata: &Metadata,
|
||||
) -> Result<Addr::Target, Error> {
|
||||
let pallet_name = address.pallet_name();
|
||||
let entry_name = address.entry_name();
|
||||
|
||||
let (_, entry_metadata) =
|
||||
utils::lookup_storage_entry_details(pallet_name, entry_name, metadata)?;
|
||||
let value_ty_id = entry_metadata.value_ty();
|
||||
|
||||
let val = Addr::Target::decode_with_metadata(bytes, value_ty_id, metadata)?;
|
||||
Ok(val)
|
||||
) -> Result<Addr::Value, Error> {
|
||||
frame_decode::storage::decode_storage_value(
|
||||
address.pallet_name(),
|
||||
address.entry_name(),
|
||||
bytes,
|
||||
&**metadata,
|
||||
metadata.types(),
|
||||
Addr::Value::into_visitor(),
|
||||
)
|
||||
.map_err(|e| StorageAddressError::StorageValueDecodeError(e).into())
|
||||
}
|
||||
|
||||
/// Return the default value at a given storage address if one is available, or an error otherwise.
|
||||
/// Return the default value at a given storage address if one is available, or None otherwise.
|
||||
pub fn default_value<Addr: Address>(
|
||||
address: &Addr,
|
||||
metadata: &Metadata,
|
||||
) -> Result<Addr::Target, Error> {
|
||||
let pallet_name = address.pallet_name();
|
||||
let entry_name = address.entry_name();
|
||||
) -> Result<Option<Addr::Value>, Error> {
|
||||
let storage_info = metadata
|
||||
.storage_info(address.pallet_name(), address.entry_name())
|
||||
.map_err(|e| StorageAddressError::StorageInfoError(e.into_owned()))?;
|
||||
let value = frame_decode::storage::decode_default_storage_value_with_info(
|
||||
&storage_info,
|
||||
metadata.types(),
|
||||
Addr::Value::into_visitor(),
|
||||
)
|
||||
.map_err(|e| StorageAddressError::StorageValueDecodeError(e))?;
|
||||
|
||||
let (_, entry_metadata) =
|
||||
utils::lookup_storage_entry_details(pallet_name, entry_name, metadata)?;
|
||||
let value_ty_id = entry_metadata.value_ty();
|
||||
let default_bytes = entry_metadata.default_value();
|
||||
let val = Addr::Target::decode_with_metadata(&mut &*default_bytes, value_ty_id, metadata)?;
|
||||
Ok(val)
|
||||
Ok(value)
|
||||
}
|
||||
|
||||
@@ -1,471 +0,0 @@
|
||||
// Copyright 2019-2024 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
use super::utils::hash_bytes;
|
||||
use crate::error::{Error, MetadataError, StorageAddressError};
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use scale_decode::{DecodeAsType, visitor::IgnoreVisitor};
|
||||
use scale_encode::EncodeAsType;
|
||||
use scale_info::{PortableRegistry, TypeDef};
|
||||
use scale_value::Value;
|
||||
use subxt_metadata::{StorageEntryType, StorageHasher};
|
||||
|
||||
/// A collection of storage hashers paired with the type ids of the types they should hash.
|
||||
/// Can be created for each storage entry in the metadata via [`StorageHashers::new()`].
|
||||
#[derive(Debug)]
|
||||
pub struct StorageHashers {
|
||||
hashers_and_ty_ids: Vec<(StorageHasher, u32)>,
|
||||
}
|
||||
|
||||
impl StorageHashers {
|
||||
/// Creates new [`StorageHashers`] from a storage entry. Looks at the [`StorageEntryType`] and
|
||||
/// assigns a hasher to each type id that makes up the key.
|
||||
pub fn new(storage_entry: &StorageEntryType, types: &PortableRegistry) -> Result<Self, Error> {
|
||||
let mut hashers_and_ty_ids = vec![];
|
||||
if let StorageEntryType::Map {
|
||||
hashers, key_ty, ..
|
||||
} = storage_entry
|
||||
{
|
||||
let ty = types
|
||||
.resolve(*key_ty)
|
||||
.ok_or(MetadataError::TypeNotFound(*key_ty))?;
|
||||
|
||||
if hashers.len() == 1 {
|
||||
// If there's exactly 1 hasher, then we have a plain StorageMap. We can't
|
||||
// break the key down (even if it's a tuple) because the hasher applies to
|
||||
// the whole key.
|
||||
hashers_and_ty_ids = vec![(hashers[0], *key_ty)];
|
||||
} else {
|
||||
// If there are multiple hashers, then we have a StorageDoubleMap or StorageNMap.
|
||||
// We expect the key type to be tuple, and we will return a MapEntryKey for each
|
||||
// key in the tuple.
|
||||
let hasher_count = hashers.len();
|
||||
let tuple = match &ty.type_def {
|
||||
TypeDef::Tuple(tuple) => tuple,
|
||||
_ => {
|
||||
return Err(StorageAddressError::WrongNumberOfHashers {
|
||||
hashers: hasher_count,
|
||||
fields: 1,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
};
|
||||
|
||||
// We should have the same number of hashers and keys.
|
||||
let key_count = tuple.fields.len();
|
||||
if hasher_count != key_count {
|
||||
return Err(StorageAddressError::WrongNumberOfHashers {
|
||||
hashers: hasher_count,
|
||||
fields: key_count,
|
||||
}
|
||||
.into());
|
||||
}
|
||||
|
||||
// Collect them together.
|
||||
hashers_and_ty_ids = tuple
|
||||
.fields
|
||||
.iter()
|
||||
.zip(hashers)
|
||||
.map(|(field, hasher)| (*hasher, field.id))
|
||||
.collect();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self { hashers_and_ty_ids })
|
||||
}
|
||||
|
||||
/// Creates an iterator over the storage hashers and type ids.
|
||||
pub fn iter(&self) -> StorageHashersIter<'_> {
|
||||
StorageHashersIter {
|
||||
hashers: self,
|
||||
idx: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over all type ids of the key and the respective hashers.
|
||||
/// See [`StorageHashers::iter()`].
|
||||
#[derive(Debug)]
|
||||
pub struct StorageHashersIter<'a> {
|
||||
hashers: &'a StorageHashers,
|
||||
idx: usize,
|
||||
}
|
||||
|
||||
impl StorageHashersIter<'_> {
|
||||
fn next_or_err(&mut self) -> Result<(StorageHasher, u32), Error> {
|
||||
self.next().ok_or_else(|| {
|
||||
StorageAddressError::TooManyKeys {
|
||||
expected: self.hashers.hashers_and_ty_ids.len(),
|
||||
}
|
||||
.into()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Iterator for StorageHashersIter<'_> {
|
||||
type Item = (StorageHasher, u32);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let item = self.hashers.hashers_and_ty_ids.get(self.idx).copied()?;
|
||||
self.idx += 1;
|
||||
Some(item)
|
||||
}
|
||||
}
|
||||
|
||||
impl ExactSizeIterator for StorageHashersIter<'_> {
|
||||
fn len(&self) -> usize {
|
||||
self.hashers.hashers_and_ty_ids.len() - self.idx
|
||||
}
|
||||
}
|
||||
|
||||
/// This trait should be implemented by anything that can be used as one or multiple storage keys.
|
||||
pub trait StorageKey {
|
||||
/// Encodes the storage key into some bytes
|
||||
fn encode_storage_key(
|
||||
&self,
|
||||
bytes: &mut Vec<u8>,
|
||||
hashers: &mut StorageHashersIter,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<(), Error>;
|
||||
|
||||
/// Attempts to decode the StorageKey given some bytes and a set of hashers and type IDs that they are meant to represent.
|
||||
/// The bytes passed to `decode` should start with:
|
||||
/// - 1. some fixed size hash (for all hashers except `Identity`)
|
||||
/// - 2. the plain key value itself (for `Identity`, `Blake2_128Concat` and `Twox64Concat` hashers)
|
||||
fn decode_storage_key(
|
||||
bytes: &mut &[u8],
|
||||
hashers: &mut StorageHashersIter,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
Self: Sized + 'static;
|
||||
}
|
||||
|
||||
/// Implement `StorageKey` for `()` which can be used for keyless storage entries,
|
||||
/// or to otherwise just ignore some entry.
|
||||
impl StorageKey for () {
|
||||
fn encode_storage_key(
|
||||
&self,
|
||||
_bytes: &mut Vec<u8>,
|
||||
hashers: &mut StorageHashersIter,
|
||||
_types: &PortableRegistry,
|
||||
) -> Result<(), Error> {
|
||||
_ = hashers.next_or_err();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn decode_storage_key(
|
||||
bytes: &mut &[u8],
|
||||
hashers: &mut StorageHashersIter,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<Self, Error> {
|
||||
let (hasher, ty_id) = match hashers.next_or_err() {
|
||||
Ok((hasher, ty_id)) => (hasher, ty_id),
|
||||
Err(_) if bytes.is_empty() => return Ok(()),
|
||||
Err(err) => return Err(err),
|
||||
};
|
||||
consume_hash_returning_key_bytes(bytes, hasher, ty_id, types)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A storage key used as part of the static codegen.
|
||||
#[derive(Clone, Debug, PartialOrd, PartialEq, Eq)]
|
||||
pub struct StaticStorageKey<K> {
|
||||
key: K,
|
||||
}
|
||||
|
||||
impl<K> StaticStorageKey<K> {
|
||||
/// Creates a new static storage key.
|
||||
pub fn new(key: K) -> Self {
|
||||
StaticStorageKey { key }
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Clone> StaticStorageKey<K> {
|
||||
/// Returns the decoded storage key.
|
||||
pub fn into_key(self) -> K {
|
||||
self.key
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: EncodeAsType + DecodeAsType> StorageKey for StaticStorageKey<K> {
|
||||
fn encode_storage_key(
|
||||
&self,
|
||||
bytes: &mut Vec<u8>,
|
||||
hashers: &mut StorageHashersIter,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<(), Error> {
|
||||
let (hasher, ty_id) = hashers.next_or_err()?;
|
||||
let encoded_value = self.key.encode_as_type(ty_id, types)?;
|
||||
hash_bytes(&encoded_value, hasher, bytes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn decode_storage_key(
|
||||
bytes: &mut &[u8],
|
||||
hashers: &mut StorageHashersIter,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
Self: Sized + 'static,
|
||||
{
|
||||
let (hasher, ty_id) = hashers.next_or_err()?;
|
||||
let key_bytes = consume_hash_returning_key_bytes(bytes, hasher, ty_id, types)?;
|
||||
|
||||
// if the hasher had no key appended, we can't decode it into a `StaticStorageKey`.
|
||||
let Some(key_bytes) = key_bytes else {
|
||||
return Err(StorageAddressError::HasherCannotReconstructKey { ty_id, hasher }.into());
|
||||
};
|
||||
|
||||
// Decode and return the key.
|
||||
let key = K::decode_as_type(&mut &*key_bytes, ty_id, types)?;
|
||||
let key = StaticStorageKey { key };
|
||||
Ok(key)
|
||||
}
|
||||
}
|
||||
|
||||
impl StorageKey for Vec<scale_value::Value> {
|
||||
fn encode_storage_key(
|
||||
&self,
|
||||
bytes: &mut Vec<u8>,
|
||||
hashers: &mut StorageHashersIter,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<(), Error> {
|
||||
for value in self.iter() {
|
||||
let (hasher, ty_id) = hashers.next_or_err()?;
|
||||
let encoded_value = value.encode_as_type(ty_id, types)?;
|
||||
hash_bytes(&encoded_value, hasher, bytes);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn decode_storage_key(
|
||||
bytes: &mut &[u8],
|
||||
hashers: &mut StorageHashersIter,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
Self: Sized + 'static,
|
||||
{
|
||||
let mut result: Vec<scale_value::Value> = vec![];
|
||||
for (hasher, ty_id) in hashers.by_ref() {
|
||||
match consume_hash_returning_key_bytes(bytes, hasher, ty_id, types)? {
|
||||
Some(value_bytes) => {
|
||||
let value =
|
||||
scale_value::scale::decode_as_type(&mut &*value_bytes, ty_id, types)?;
|
||||
|
||||
result.push(value.remove_context());
|
||||
}
|
||||
None => {
|
||||
result.push(Value::unnamed_composite([]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We've consumed all of the hashers, so we expect to also consume all of the bytes:
|
||||
if !bytes.is_empty() {
|
||||
return Err(StorageAddressError::TooManyBytes.into());
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
// Skip over the hash bytes (including any key at the end), returning bytes
|
||||
// representing the key if one exists, or None if the hasher has no key appended.
|
||||
fn consume_hash_returning_key_bytes<'a>(
|
||||
bytes: &mut &'a [u8],
|
||||
hasher: StorageHasher,
|
||||
ty_id: u32,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<Option<&'a [u8]>, Error> {
|
||||
// Strip the bytes off for the actual hash, consuming them.
|
||||
let bytes_to_strip = hasher.len_excluding_key();
|
||||
if bytes.len() < bytes_to_strip {
|
||||
return Err(StorageAddressError::NotEnoughBytes.into());
|
||||
}
|
||||
*bytes = &bytes[bytes_to_strip..];
|
||||
|
||||
// Now, find the bytes representing the key, consuming them.
|
||||
let before_key = *bytes;
|
||||
if hasher.ends_with_key() {
|
||||
scale_decode::visitor::decode_with_visitor(
|
||||
bytes,
|
||||
ty_id,
|
||||
types,
|
||||
IgnoreVisitor::<PortableRegistry>::new(),
|
||||
)
|
||||
.map_err(|err| Error::Decode(err.into()))?;
|
||||
// Return the key bytes, having advanced the input cursor past them.
|
||||
let key_bytes = &before_key[..before_key.len() - bytes.len()];
|
||||
|
||||
Ok(Some(key_bytes))
|
||||
} else {
|
||||
// There are no key bytes, so return None.
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Generates StorageKey implementations for tuples
|
||||
macro_rules! impl_tuples {
|
||||
($($ty:ident $n:tt),+) => {{
|
||||
impl<$($ty: StorageKey),+> StorageKey for ($( $ty ),+) {
|
||||
fn encode_storage_key(
|
||||
&self,
|
||||
bytes: &mut Vec<u8>,
|
||||
hashers: &mut StorageHashersIter,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<(), Error> {
|
||||
$( self.$n.encode_storage_key(bytes, hashers, types)?; )+
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn decode_storage_key(
|
||||
bytes: &mut &[u8],
|
||||
hashers: &mut StorageHashersIter,
|
||||
types: &PortableRegistry,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
Self: Sized + 'static,
|
||||
{
|
||||
Ok( ( $( $ty::decode_storage_key(bytes, hashers, types)?, )+ ) )
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const _: () = {
|
||||
impl_tuples!(A 0, B 1);
|
||||
impl_tuples!(A 0, B 1, C 2);
|
||||
impl_tuples!(A 0, B 1, C 2, D 3);
|
||||
impl_tuples!(A 0, B 1, C 2, D 3, E 4);
|
||||
impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5);
|
||||
impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6);
|
||||
impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7);
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use codec::Encode;
|
||||
use scale_info::{PortableRegistry, Registry, TypeInfo, meta_type};
|
||||
use subxt_metadata::StorageHasher;
|
||||
|
||||
use crate::utils::Era;
|
||||
|
||||
use alloc::string::String;
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
|
||||
use super::{StaticStorageKey, StorageKey};
|
||||
|
||||
struct KeyBuilder {
|
||||
registry: Registry,
|
||||
bytes: Vec<u8>,
|
||||
hashers_and_ty_ids: Vec<(StorageHasher, u32)>,
|
||||
}
|
||||
|
||||
impl KeyBuilder {
|
||||
fn new() -> KeyBuilder {
|
||||
KeyBuilder {
|
||||
registry: Registry::new(),
|
||||
bytes: vec![],
|
||||
hashers_and_ty_ids: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
fn add<T: TypeInfo + Encode + 'static>(mut self, value: T, hasher: StorageHasher) -> Self {
|
||||
let id = self.registry.register_type(&meta_type::<T>()).id;
|
||||
|
||||
self.hashers_and_ty_ids.push((hasher, id));
|
||||
for _i in 0..hasher.len_excluding_key() {
|
||||
self.bytes.push(0);
|
||||
}
|
||||
value.encode_to(&mut self.bytes);
|
||||
self
|
||||
}
|
||||
|
||||
fn build(self) -> (PortableRegistry, Vec<u8>, Vec<(StorageHasher, u32)>) {
|
||||
(self.registry.into(), self.bytes, self.hashers_and_ty_ids)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn storage_key_decoding_fuzz() {
|
||||
let hashers = [
|
||||
StorageHasher::Blake2_128,
|
||||
StorageHasher::Blake2_128Concat,
|
||||
StorageHasher::Blake2_256,
|
||||
StorageHasher::Identity,
|
||||
StorageHasher::Twox128,
|
||||
StorageHasher::Twox256,
|
||||
StorageHasher::Twox64Concat,
|
||||
];
|
||||
|
||||
let key_preserving_hashers = [
|
||||
StorageHasher::Blake2_128Concat,
|
||||
StorageHasher::Identity,
|
||||
StorageHasher::Twox64Concat,
|
||||
];
|
||||
|
||||
type T4A = (
|
||||
(),
|
||||
StaticStorageKey<u32>,
|
||||
StaticStorageKey<String>,
|
||||
StaticStorageKey<Era>,
|
||||
);
|
||||
type T4B = (
|
||||
(),
|
||||
(StaticStorageKey<u32>, StaticStorageKey<String>),
|
||||
StaticStorageKey<Era>,
|
||||
);
|
||||
type T4C = (
|
||||
((), StaticStorageKey<u32>),
|
||||
(StaticStorageKey<String>, StaticStorageKey<Era>),
|
||||
);
|
||||
|
||||
let era = Era::Immortal;
|
||||
for h0 in hashers {
|
||||
for h1 in key_preserving_hashers {
|
||||
for h2 in key_preserving_hashers {
|
||||
for h3 in key_preserving_hashers {
|
||||
let (types, bytes, hashers_and_ty_ids) = KeyBuilder::new()
|
||||
.add((), h0)
|
||||
.add(13u32, h1)
|
||||
.add("Hello", h2)
|
||||
.add(era, h3)
|
||||
.build();
|
||||
|
||||
let hashers = super::StorageHashers { hashers_and_ty_ids };
|
||||
let keys_a =
|
||||
T4A::decode_storage_key(&mut &bytes[..], &mut hashers.iter(), &types)
|
||||
.unwrap();
|
||||
|
||||
let keys_b =
|
||||
T4B::decode_storage_key(&mut &bytes[..], &mut hashers.iter(), &types)
|
||||
.unwrap();
|
||||
|
||||
let keys_c =
|
||||
T4C::decode_storage_key(&mut &bytes[..], &mut hashers.iter(), &types)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(keys_a.1.into_key(), 13);
|
||||
assert_eq!(keys_b.1.0.into_key(), 13);
|
||||
assert_eq!(keys_c.0.1.into_key(), 13);
|
||||
|
||||
assert_eq!(keys_a.2.into_key(), "Hello");
|
||||
assert_eq!(keys_b.1.1.into_key(), "Hello");
|
||||
assert_eq!(keys_c.1.0.into_key(), "Hello");
|
||||
assert_eq!(keys_a.3.into_key(), era);
|
||||
assert_eq!(keys_b.2.into_key(), era);
|
||||
assert_eq!(keys_c.1.1.into_key(), era);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
// Copyright 2019-2024 Parity Technologies (UK) Ltd.
|
||||
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
|
||||
// see LICENSE for license details.
|
||||
|
||||
//! these utility methods complement the [`Address`] trait, but
|
||||
//! aren't things that should ever be overridden, and so don't exist on
|
||||
//! the trait itself.
|
||||
|
||||
use super::address::Address;
|
||||
use crate::error::{Error, MetadataError};
|
||||
use crate::metadata::Metadata;
|
||||
use alloc::borrow::ToOwned;
|
||||
use alloc::vec::Vec;
|
||||
use subxt_metadata::{PalletMetadata, StorageEntryMetadata, StorageHasher};
|
||||
|
||||
/// Return the root of a given [`Address`]: hash the pallet name and entry name
|
||||
/// and append those bytes to the output.
|
||||
pub fn write_storage_address_root_bytes<Addr: Address>(addr: &Addr, out: &mut Vec<u8>) {
|
||||
out.extend(sp_crypto_hashing::twox_128(addr.pallet_name().as_bytes()));
|
||||
out.extend(sp_crypto_hashing::twox_128(addr.entry_name().as_bytes()));
|
||||
}
|
||||
|
||||
/// Take some SCALE encoded bytes and a [`StorageHasher`] and hash the bytes accordingly.
|
||||
pub fn hash_bytes(input: &[u8], hasher: StorageHasher, bytes: &mut Vec<u8>) {
|
||||
match hasher {
|
||||
StorageHasher::Identity => bytes.extend(input),
|
||||
StorageHasher::Blake2_128 => bytes.extend(sp_crypto_hashing::blake2_128(input)),
|
||||
StorageHasher::Blake2_128Concat => {
|
||||
bytes.extend(sp_crypto_hashing::blake2_128(input));
|
||||
bytes.extend(input);
|
||||
}
|
||||
StorageHasher::Blake2_256 => bytes.extend(sp_crypto_hashing::blake2_256(input)),
|
||||
StorageHasher::Twox128 => bytes.extend(sp_crypto_hashing::twox_128(input)),
|
||||
StorageHasher::Twox256 => bytes.extend(sp_crypto_hashing::twox_256(input)),
|
||||
StorageHasher::Twox64Concat => {
|
||||
bytes.extend(sp_crypto_hashing::twox_64(input));
|
||||
bytes.extend(input);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return details about the given storage entry.
|
||||
pub fn lookup_storage_entry_details<'a>(
|
||||
pallet_name: &str,
|
||||
entry_name: &str,
|
||||
metadata: &'a Metadata,
|
||||
) -> Result<(PalletMetadata<'a>, &'a StorageEntryMetadata), Error> {
|
||||
let pallet_metadata = metadata.pallet_by_name_err(pallet_name)?;
|
||||
let storage_metadata = pallet_metadata
|
||||
.storage()
|
||||
.ok_or_else(|| MetadataError::StorageNotFoundInPallet(pallet_name.to_owned()))?;
|
||||
let storage_entry = storage_metadata
|
||||
.entry_by_name(entry_name)
|
||||
.ok_or_else(|| MetadataError::StorageEntryNotFound(entry_name.to_owned()))?;
|
||||
Ok((pallet_metadata, storage_entry))
|
||||
}
|
||||
@@ -250,7 +250,7 @@ where
|
||||
let pallet_name = &*self.pallet_name;
|
||||
let storage_name = &*self.storage_name;
|
||||
|
||||
frame_decode::storage::encode_prefix(pallet_name, storage_name)
|
||||
frame_decode::storage::encode_storage_key_prefix(pallet_name, storage_name)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user