Use scale-encode and scale-decode to encode and decode based on metadata (#842)

* WIP EncodeAsType and DecodeAsType

* remove silly cli experiment code

* Get things finally compiling with EncodeAsType and DecodeAsType

* update codegen test and WrapperKeepOpaque proper impl (in case it shows up in codegen)

* fix tests

* accomodate scale-value changes

* starting to migrate to EncodeAsType/DecodeAsType

* static event decoding and tx encoding to use DecodeAsFields/EncodeAsFields

* some tidy up and add decode(skip) attrs where needed

* fix root event decoding

* #[codec(skip)] will do, and combine map_key stuff into storage_address since it's all specific to that

* fmt and clippy

* update Cargo.lock

* remove patched scale-encode

* bump scale-encode to 0.1 and remove unused dep in testing crate

* update deps and use released scale-decode

* update scale-value to latest to remove git branch

* Apply suggestions from code review

Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>

* remove sorting in derives/attr generation; spit them out in order given

* re-add derive sorting; it's a hashmap

* StaticTxPayload and DynamicTxPayload rolled into single Payload struct

* StaticStorageAddress and DynamicStorageAddress into single Address struct

* Fix storage address byte retrieval

* StaticConstantAddress and DynamicConstantAddress => Address

* Simplify storage codegen to fix test

* Add comments

* Alias to RuntimeEvent rather than making another, and prep for substituting call type

* remove unnecessary clone

* Fix docs and failing UI test

* root_bytes -> to_root_bytes

* document error case in StorageClient::address_bytes()

---------

Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com>
This commit is contained in:
James Wilson
2023-03-21 15:31:13 +00:00
committed by GitHub
parent c9527abaa8
commit c63ff6ec6d
50 changed files with 9965 additions and 6262 deletions
+10 -16
View File
@@ -6,7 +6,6 @@
mod storage_address;
mod storage_client;
mod storage_map_key;
mod storage_type;
pub mod utils;
@@ -24,19 +23,14 @@ pub use crate::rpc::types::StorageKey;
/// Types representing an address which describes where a storage
/// entry lives and how to properly decode it.
pub mod address {
pub use super::{
storage_address::{
dynamic,
dynamic_root,
DynamicStorageAddress,
StaticStorageAddress,
StorageAddress,
Yes,
},
storage_map_key::{
StorageHasher,
StorageMapKey,
},
pub use super::storage_address::{
dynamic,
dynamic_root,
Address,
DynamicAddress,
StaticStorageMapKey,
StorageAddress,
Yes,
};
}
@@ -45,7 +39,7 @@ pub mod address {
pub use storage_address::{
dynamic,
dynamic_root,
DynamicStorageAddress,
StaticStorageAddress,
Address,
DynamicAddress,
StorageAddress,
};
+118 -112
View File
@@ -2,7 +2,6 @@
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
use super::storage_map_key::StorageMapKey;
use crate::{
dynamic::{
DecodedValueThunk,
@@ -18,13 +17,13 @@ use crate::{
Metadata,
},
};
use frame_metadata::StorageEntryType;
use frame_metadata::{
StorageEntryType,
StorageHasher,
};
use scale_info::TypeDef;
use std::borrow::Cow;
// We use this type a bunch, so export it from here.
pub use frame_metadata::StorageHasher;
/// This represents a storage address. Anything implementing this trait
/// can be used to fetch and iterate over storage entries.
pub trait StorageAddress {
@@ -66,34 +65,55 @@ pub trait StorageAddress {
/// fetched and returned with a default value in the type system.
pub struct Yes;
/// This represents a statically generated storage lookup address.
pub struct StaticStorageAddress<ReturnTy, Fetchable, Defaultable, Iterable> {
pallet_name: &'static str,
entry_name: &'static str,
// How to access the specific value at that storage address.
storage_entry_keys: Vec<StorageMapKey>,
// Hash provided from static code for validation.
/// A concrete storage address. This can be created from static values (ie those generated
/// via the `subxt` macro) or dynamic values via [`dynamic`] and [`dynamic_root`].
pub struct Address<StorageKey, ReturnTy, Fetchable, Defaultable, Iterable> {
pallet_name: Cow<'static, str>,
entry_name: Cow<'static, str>,
storage_entry_keys: Vec<StorageKey>,
validation_hash: Option<[u8; 32]>,
_marker: std::marker::PhantomData<(ReturnTy, Fetchable, Defaultable, Iterable)>,
}
impl<ReturnTy, Fetchable, Defaultable, Iterable>
StaticStorageAddress<ReturnTy, Fetchable, Defaultable, Iterable>
/// A typical storage address constructed at runtime rather than via the `subxt` macro; this
/// has no restriction on what it can be used for (since we don't statically know).
pub type DynamicAddress<StorageKey> =
Address<StorageKey, DecodedValueThunk, Yes, Yes, Yes>;
impl<StorageKey, ReturnTy, Fetchable, Defaultable, Iterable>
Address<StorageKey, ReturnTy, Fetchable, Defaultable, Iterable>
where
StorageKey: EncodeWithMetadata,
ReturnTy: DecodeWithMetadata,
{
/// Create a new [`StaticStorageAddress`] that will be validated
/// against node metadata using the hash given.
/// Create a new [`Address`] to use to access a storage entry.
pub fn new(
pallet_name: impl Into<String>,
entry_name: impl Into<String>,
storage_entry_keys: Vec<StorageKey>,
) -> Self {
Self {
pallet_name: Cow::Owned(pallet_name.into()),
entry_name: Cow::Owned(entry_name.into()),
storage_entry_keys: storage_entry_keys.into_iter().collect(),
validation_hash: None,
_marker: std::marker::PhantomData,
}
}
/// Create a new [`Address`] using static strings for the pallet and call name.
/// This is only expected to be used from codegen.
#[doc(hidden)]
pub fn new_static(
pallet_name: &'static str,
entry_name: &'static str,
storage_entry_keys: Vec<StorageMapKey>,
storage_entry_keys: Vec<StorageKey>,
hash: [u8; 32],
) -> Self {
Self {
pallet_name,
entry_name,
storage_entry_keys,
pallet_name: Cow::Borrowed(pallet_name),
entry_name: Cow::Borrowed(entry_name),
storage_entry_keys: storage_entry_keys.into_iter().collect(),
validation_hash: Some(hash),
_marker: std::marker::PhantomData,
}
@@ -107,100 +127,24 @@ where
}
}
/// Return bytes representing this storage entry.
pub fn to_bytes(&self) -> Vec<u8> {
let mut bytes = Vec::new();
super::utils::write_storage_address_root_bytes(self, &mut bytes);
for entry in &self.storage_entry_keys {
entry.to_bytes(&mut bytes);
}
bytes
}
/// Return bytes representing the root of this storage entry (ie a hash of
/// the pallet and entry name).
/// the pallet and entry name). Use [`crate::storage::StorageClient::address_bytes()`]
/// to obtain the bytes representing the entire address.
pub fn to_root_bytes(&self) -> Vec<u8> {
super::utils::storage_address_root_bytes(self)
}
}
impl<ReturnTy, Fetchable, Defaultable, Iterable> StorageAddress
for StaticStorageAddress<ReturnTy, Fetchable, Defaultable, Iterable>
impl<StorageKey, ReturnTy, Fetchable, Defaultable, Iterable> StorageAddress
for Address<StorageKey, ReturnTy, Fetchable, Defaultable, Iterable>
where
StorageKey: EncodeWithMetadata,
ReturnTy: DecodeWithMetadata,
{
type Target = ReturnTy;
type IsFetchable = Fetchable;
type IsDefaultable = Defaultable;
type IsIterable = Iterable;
type IsFetchable = Fetchable;
fn pallet_name(&self) -> &str {
self.pallet_name
}
fn entry_name(&self) -> &str {
self.entry_name
}
fn append_entry_bytes(
&self,
_metadata: &Metadata,
bytes: &mut Vec<u8>,
) -> Result<(), Error> {
for entry in &self.storage_entry_keys {
entry.to_bytes(bytes);
}
Ok(())
}
fn validation_hash(&self) -> Option<[u8; 32]> {
self.validation_hash
}
}
/// This represents a dynamically generated storage address.
pub struct DynamicStorageAddress<'a, Encodable> {
pallet_name: Cow<'a, str>,
entry_name: Cow<'a, str>,
storage_entry_keys: Vec<Encodable>,
}
/// Construct a new dynamic storage lookup to the root of some entry.
pub fn dynamic_root<'a>(
pallet_name: impl Into<Cow<'a, str>>,
entry_name: impl Into<Cow<'a, str>>,
) -> DynamicStorageAddress<'a, Value> {
DynamicStorageAddress {
pallet_name: pallet_name.into(),
entry_name: entry_name.into(),
storage_entry_keys: vec![],
}
}
/// Construct a new dynamic storage lookup.
pub fn dynamic<'a, Encodable: EncodeWithMetadata>(
pallet_name: impl Into<Cow<'a, str>>,
entry_name: impl Into<Cow<'a, str>>,
storage_entry_keys: Vec<Encodable>,
) -> DynamicStorageAddress<'a, Encodable> {
DynamicStorageAddress {
pallet_name: pallet_name.into(),
entry_name: entry_name.into(),
storage_entry_keys,
}
}
impl<'a, Encodable> StorageAddress for DynamicStorageAddress<'a, Encodable>
where
Encodable: EncodeWithMetadata,
{
type Target = DecodedValueThunk;
// For dynamic types, we have no static guarantees about any of
// this stuff, so we just allow it and let it fail at runtime:
type IsFetchable = Yes;
type IsDefaultable = Yes;
type IsIterable = Yes;
fn pallet_name(&self) -> &str {
&self.pallet_name
@@ -239,11 +183,9 @@ where
// If the key is not a tuple, encode a single value to the key type.
let type_ids = match ty.type_def() {
TypeDef::Tuple(tuple) => {
tuple.fields().iter().map(|f| f.id()).collect()
}
_other => {
vec![key.id()]
either::Either::Left(tuple.fields().iter().map(|f| f.id()))
}
_other => either::Either::Right(std::iter::once(key.id())),
};
if type_ids.len() != self.storage_entry_keys.len() {
@@ -257,19 +199,19 @@ where
if hashers.len() == 1 {
// One hasher; hash a tuple of all SCALE encoded bytes with the one hash function.
let mut input = Vec::new();
for (key, type_id) in self.storage_entry_keys.iter().zip(type_ids) {
let iter = self.storage_entry_keys.iter().zip(type_ids);
for (key, type_id) in iter {
key.encode_with_metadata(type_id, metadata, &mut input)?;
}
super::storage_map_key::hash_bytes(&input, &hashers[0], bytes);
hash_bytes(&input, &hashers[0], bytes);
Ok(())
} else if hashers.len() == type_ids.len() {
let iter = self.storage_entry_keys.iter().zip(type_ids).zip(hashers);
// A hasher per field; encode and hash each field independently.
for ((key, type_id), hasher) in
self.storage_entry_keys.iter().zip(type_ids).zip(hashers)
{
for ((key, type_id), hasher) in iter {
let mut input = Vec::new();
key.encode_with_metadata(type_id, metadata, &mut input)?;
super::storage_map_key::hash_bytes(&input, hasher, bytes);
hash_bytes(&input, hasher, bytes);
}
Ok(())
} else {
@@ -283,4 +225,68 @@ where
}
}
}
fn validation_hash(&self) -> Option<[u8; 32]> {
self.validation_hash
}
}
/// A static storage key; this is some pre-encoded bytes
/// likely provided by the generated interface.
pub struct StaticStorageMapKey(pub Vec<u8>);
impl StaticStorageMapKey {
/// Create a new [`StaticStorageMapKey`] by pre-encoding static data.
pub fn new<Encodable: codec::Encode>(value: Encodable) -> StaticStorageMapKey {
Self(value.encode())
}
}
impl EncodeWithMetadata for StaticStorageMapKey {
fn encode_with_metadata(
&self,
_type_id: u32,
_metadata: &Metadata,
bytes: &mut Vec<u8>,
) -> Result<(), Error> {
// We just use the already-encoded bytes for a static storage key:
bytes.extend(&self.0);
Ok(())
}
}
/// Construct a new dynamic storage lookup to the root of some entry.
pub fn dynamic_root(
pallet_name: impl Into<String>,
entry_name: impl Into<String>,
) -> DynamicAddress<Value> {
DynamicAddress::new(pallet_name, entry_name, vec![])
}
/// Construct a new dynamic storage lookup.
pub fn dynamic<StorageKey: EncodeWithMetadata>(
pallet_name: impl Into<String>,
entry_name: impl Into<String>,
storage_entry_keys: Vec<StorageKey>,
) -> DynamicAddress<StorageKey> {
DynamicAddress::new(pallet_name, entry_name, storage_entry_keys)
}
/// Take some SCALE encoded bytes and a [`StorageHasher`] and hash the bytes accordingly.
fn hash_bytes(input: &[u8], hasher: &StorageHasher, bytes: &mut Vec<u8>) {
match hasher {
StorageHasher::Identity => bytes.extend(input),
StorageHasher::Blake2_128 => bytes.extend(sp_core_hashing::blake2_128(input)),
StorageHasher::Blake2_128Concat => {
bytes.extend(sp_core_hashing::blake2_128(input));
bytes.extend(input);
}
StorageHasher::Blake2_256 => bytes.extend(sp_core_hashing::blake2_256(input)),
StorageHasher::Twox128 => bytes.extend(sp_core_hashing::twox_128(input)),
StorageHasher::Twox256 => bytes.extend(sp_core_hashing::twox_256(input)),
StorageHasher::Twox64Concat => {
bytes.extend(sp_core_hashing::twox_64(input));
bytes.extend(input);
}
}
}
+22
View File
@@ -7,6 +7,7 @@ use super::{
validate_storage_address,
Storage,
},
utils,
StorageAddress,
};
@@ -57,6 +58,27 @@ where
) -> Result<(), Error> {
validate_storage_address(address, &self.client.metadata())
}
/// Convert some storage address into the raw bytes that would be submitted to the node in order
/// to retrieve the entries at the root of the associated address.
pub fn address_root_bytes<Address: StorageAddress>(
&self,
address: &Address,
) -> Vec<u8> {
utils::storage_address_root_bytes(address)
}
/// Convert some storage address into the raw bytes that would be submitted to the node in order
/// to retrieve an entry. This fails if [`StorageAddress::append_entry_bytes`] does; in the built-in
/// implementation this would be if the pallet and storage entry being asked for is not available on the
/// node you're communicating with, or if the metadata is missing some type information (which should not
/// happen).
pub fn address_bytes<Address: StorageAddress>(
&self,
address: &Address,
) -> Result<Vec<u8>, Error> {
utils::storage_address_bytes(address, &self.client.metadata())
}
}
impl<T, Client> StorageClient<T, Client>
-52
View File
@@ -1,52 +0,0 @@
// Copyright 2019-2022 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
use codec::Encode;
// We use this type a bunch, so export it from here.
pub use frame_metadata::StorageHasher;
/// Storage key for a Map.
#[derive(Clone)]
pub struct StorageMapKey {
value: Vec<u8>,
hasher: StorageHasher,
}
impl StorageMapKey {
/// Create a new [`StorageMapKey`] by pre-encoding static data and pairing it with a hasher.
pub fn new<Encodable: Encode>(
value: Encodable,
hasher: StorageHasher,
) -> StorageMapKey {
Self {
value: value.encode(),
hasher,
}
}
/// Convert this [`StorageMapKey`] into bytes and append them to some existing bytes.
pub fn to_bytes(&self, bytes: &mut Vec<u8>) {
hash_bytes(&self.value, &self.hasher, bytes)
}
}
/// Take some SCALE encoded bytes and a [`StorageHasher`] and hash the bytes accordingly.
pub(super) fn hash_bytes(input: &[u8], hasher: &StorageHasher, bytes: &mut Vec<u8>) {
match hasher {
StorageHasher::Identity => bytes.extend(input),
StorageHasher::Blake2_128 => bytes.extend(sp_core_hashing::blake2_128(input)),
StorageHasher::Blake2_128Concat => {
bytes.extend(sp_core_hashing::blake2_128(input));
bytes.extend(input);
}
StorageHasher::Blake2_256 => bytes.extend(sp_core_hashing::blake2_256(input)),
StorageHasher::Twox128 => bytes.extend(sp_core_hashing::twox_128(input)),
StorageHasher::Twox256 => bytes.extend(sp_core_hashing::twox_256(input)),
StorageHasher::Twox64Concat => {
bytes.extend(sp_core_hashing::twox_64(input));
bytes.extend(input);
}
}
}
+39 -45
View File
@@ -7,10 +7,7 @@ use super::storage_address::{
Yes,
};
use crate::{
client::{
OfflineClientT,
OnlineClientT,
},
client::OnlineClientT,
error::Error,
metadata::{
DecodeWithMetadata,
@@ -50,32 +47,16 @@ impl<T: Config, Client> Storage<T, Client> {
}
}
impl<T, Client> Storage<T, Client>
where
T: Config,
Client: OfflineClientT<T>,
{
/// Run the validation logic against some storage address you'd like to access.
///
/// Method has the same meaning as [`StorageClient::validate`](super::storage_client::StorageClient::validate).
pub fn validate<Address: StorageAddress>(
&self,
address: &Address,
) -> Result<(), Error> {
validate_storage_address(address, &self.client.metadata())
}
}
impl<T, Client> Storage<T, Client>
where
T: Config,
Client: OnlineClientT<T>,
{
/// Fetch the raw encoded value at the address/key given.
pub fn fetch_raw<'a>(
pub fn fetch_raw<'address>(
&self,
key: &'a [u8],
) -> impl Future<Output = Result<Option<Vec<u8>>, Error>> + 'a {
key: &'address [u8],
) -> impl Future<Output = Result<Option<Vec<u8>>, Error>> + 'address {
let client = self.client.clone();
let block_hash = self.block_hash;
// Ensure that the returned future doesn't have a lifetime tied to api.storage(),
@@ -116,14 +97,12 @@ where
/// println!("Value: {:?}", value);
/// # }
/// ```
pub fn fetch<'a, Address>(
pub fn fetch<'address, Address>(
&self,
address: &'a Address,
) -> impl Future<
Output = Result<Option<<Address::Target as DecodeWithMetadata>::Target>, Error>,
> + 'a
address: &'address Address,
) -> impl Future<Output = Result<Option<Address::Target>, Error>> + 'address
where
Address: StorageAddress<IsFetchable = Yes> + 'a,
Address: StorageAddress<IsFetchable = Yes> + 'address,
{
let client = self.clone();
async move {
@@ -131,13 +110,13 @@ where
// is likely to actually correspond to a real storage entry or not.
// if not, it means static codegen doesn't line up with runtime
// metadata.
client.validate(address)?;
validate_storage_address(address, &client.client.metadata())?;
// Look up the return type ID to enable DecodeWithMetadata:
let metadata = client.client.metadata();
let lookup_bytes = super::utils::storage_address_bytes(address, &metadata)?;
if let Some(data) = client.fetch_raw(&lookup_bytes).await? {
let val = <Address::Target as DecodeWithMetadata>::decode_storage_with_metadata(
let val = decode_storage_with_metadata::<Address::Target>(
&mut &*data,
address.pallet_name(),
address.entry_name(),
@@ -151,13 +130,12 @@ where
}
/// Fetch a StorageKey that has a default value with an optional block hash.
pub fn fetch_or_default<'a, Address>(
pub fn fetch_or_default<'address, Address>(
&self,
address: &'a Address,
) -> impl Future<Output = Result<<Address::Target as DecodeWithMetadata>::Target, Error>>
+ 'a
address: &'address Address,
) -> impl Future<Output = Result<Address::Target, Error>> + 'address
where
Address: StorageAddress<IsFetchable = Yes, IsDefaultable = Yes> + 'a,
Address: StorageAddress<IsFetchable = Yes, IsDefaultable = Yes> + 'address,
{
let client = self.clone();
async move {
@@ -176,7 +154,7 @@ where
return_type_from_storage_entry_type(&storage_metadata.ty);
let bytes = &mut &storage_metadata.default[..];
let val = <Address::Target as DecodeWithMetadata>::decode_with_metadata(
let val = Address::Target::decode_with_metadata(
bytes,
return_ty_id,
&metadata,
@@ -189,12 +167,12 @@ where
/// Fetch up to `count` keys for a storage map in lexicographic order.
///
/// Supports pagination by passing a value to `start_key`.
pub fn fetch_keys<'a>(
pub fn fetch_keys<'address>(
&self,
key: &'a [u8],
key: &'address [u8],
count: u32,
start_key: Option<&'a [u8]>,
) -> impl Future<Output = Result<Vec<StorageKey>, Error>> + 'a {
start_key: Option<&'address [u8]>,
) -> impl Future<Output = Result<Vec<StorageKey>, Error>> + 'address {
let client = self.client.clone();
let block_hash = self.block_hash;
async move {
@@ -252,7 +230,7 @@ where
// is likely to actually correspond to a real storage entry or not.
// if not, it means static codegen doesn't line up with runtime
// metadata.
client.validate(&address)?;
validate_storage_address(&address, &client.client.metadata())?;
let metadata = client.client.metadata();
@@ -303,9 +281,7 @@ where
ReturnTy: DecodeWithMetadata,
{
/// Returns the next key value pair from a map.
pub async fn next(
&mut self,
) -> Result<Option<(StorageKey, ReturnTy::Target)>, Error> {
pub async fn next(&mut self) -> Result<Option<(StorageKey, ReturnTy)>, Error> {
loop {
if let Some((k, v)) = self.buffer.pop() {
let val = ReturnTy::decode_with_metadata(
@@ -402,3 +378,21 @@ fn return_type_from_storage_entry_type(entry: &StorageEntryType<PortableForm>) -
StorageEntryType::Map { value, .. } => value.id(),
}
}
/// Given some bytes, a pallet and storage name, decode the response.
fn decode_storage_with_metadata<T: DecodeWithMetadata>(
bytes: &mut &[u8],
pallet_name: &str,
storage_entry: &str,
metadata: &Metadata,
) -> Result<T, Error> {
let ty = &metadata.pallet(pallet_name)?.storage(storage_entry)?.ty;
let id = match ty {
StorageEntryType::Plain(ty) => ty.id(),
StorageEntryType::Map { value, .. } => value.id(),
};
let val = T::decode_with_metadata(bytes, id, metadata)?;
Ok(val)
}
+5 -3
View File
@@ -14,7 +14,7 @@ use crate::{
/// Return the root of a given [`StorageAddress`]: hash the pallet name and entry name
/// and append those bytes to the output.
pub fn write_storage_address_root_bytes<Address: StorageAddress>(
pub(crate) fn write_storage_address_root_bytes<Address: StorageAddress>(
addr: &Address,
out: &mut Vec<u8>,
) {
@@ -24,7 +24,7 @@ pub fn write_storage_address_root_bytes<Address: StorageAddress>(
/// Outputs the [`storage_address_root_bytes`] as well as any additional bytes that represent
/// a lookup in a storage map at that location.
pub fn storage_address_bytes<Address: StorageAddress>(
pub(crate) fn storage_address_bytes<Address: StorageAddress>(
addr: &Address,
metadata: &Metadata,
) -> Result<Vec<u8>, Error> {
@@ -35,7 +35,9 @@ pub fn storage_address_bytes<Address: StorageAddress>(
}
/// Outputs a vector containing the bytes written by [`write_storage_address_root_bytes`].
pub fn storage_address_root_bytes<Address: StorageAddress>(addr: &Address) -> Vec<u8> {
pub(crate) fn storage_address_root_bytes<Address: StorageAddress>(
addr: &Address,
) -> Vec<u8> {
let mut bytes = Vec::new();
write_storage_address_root_bytes(addr, &mut bytes);
bytes