Support V16 metadata and refactor metadata code (#1967)

* WIP integrate unstable v16 metadata into Subxt

* first pass moving retain to the CLI tool

* Remove otuer enum variant stripping and move now simpler strip_metadata to new crate. test it

* tidyup to use stripmetadata package etc

* Fix / comment out tests

* fmt

* clippy

* Fix wasm example

* wasm-example fix

* wasm-example fix

* Maske sure to move IDs around after types.retain()

* fmt

* Tweak comment

* Find dispatch error separately to avoid issues during mapping

* Expose associated type information in pallet metadata

* Hopefully fix flaky archive RPC

* remove unwanted temp file

* Address nits

* Add back commented-otu tests and address review comments

* use either, and simplify for_each
This commit is contained in:
James Wilson
2025-03-28 15:35:55 +00:00
committed by GitHub
parent 06396f8b1a
commit 72ac18491c
32 changed files with 2355 additions and 2274 deletions
-1
View File
@@ -3,6 +3,5 @@
// see LICENSE for license details.
pub mod ordered_map;
pub mod retain;
pub mod validation;
pub mod variant_index;
-55
View File
@@ -3,7 +3,6 @@
// see LICENSE for license details.
use alloc::vec::Vec;
use core::mem;
use hashbrown::HashMap;
/// A minimal ordered map to let one search for
@@ -44,32 +43,6 @@ where
self.values.is_empty()
}
/// Retain specific entries.
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&V) -> bool,
{
let values = mem::take(&mut self.values);
let map = mem::take(&mut self.map);
// Filter the values, storing a map from old to new positions:
let mut new_values = Vec::new();
let mut old_pos_to_new_pos = HashMap::new();
for (pos, value) in values.into_iter().enumerate().filter(|(_, v)| f(v)) {
old_pos_to_new_pos.insert(pos, new_values.len());
new_values.push(value);
}
// Update the values now we've filtered them:
self.values = new_values;
// Rebuild the map using the new positions:
self.map = map
.into_iter()
.filter_map(|(k, v)| old_pos_to_new_pos.get(&v).map(|v2| (k, *v2)))
.collect();
}
/// Push/insert an item to the end of the map.
pub fn push_insert(&mut self, key: K, value: V) {
let idx = self.values.len();
@@ -95,16 +68,6 @@ where
pub fn values(&self) -> &[V] {
&self.values
}
/// Mutable access to the underlying values.
pub fn values_mut(&mut self) -> &mut [V] {
&mut self.values
}
/// Return the underlying values.
pub fn into_values(self) -> Vec<V> {
self.values
}
}
impl<K, V> FromIterator<(K, V)> for OrderedMap<K, V>
@@ -119,21 +82,3 @@ where
map
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn retain() {
let mut m = OrderedMap::from_iter([(1, 'a'), (2, 'b'), (3, 'c')]);
m.retain(|v| *v != 'b');
assert_eq!(m.get_by_key(&1), Some(&'a'));
assert_eq!(m.get_by_key(&2), None);
assert_eq!(m.get_by_key(&3), Some(&'c'));
assert_eq!(m.values(), &['a', 'c'])
}
}
-444
View File
@@ -1,444 +0,0 @@
// Copyright 2019-2025 Parity Technologies (UK) Ltd.
// This file is dual-licensed as Apache-2.0 or GPL-3.0.
// see LICENSE for license details.
//! Utility functions to generate a subset of the metadata.
use crate::{
ExtrinsicMetadata, Metadata, PalletMetadataInner, RuntimeApiMetadataInner, StorageEntryType,
};
use alloc::collections::BTreeSet;
use alloc::vec::Vec;
use scale_info::{
PortableType, TypeDef, TypeDefArray, TypeDefBitSequence, TypeDefCompact, TypeDefComposite,
TypeDefSequence, TypeDefTuple, TypeDefVariant,
};
#[derive(Clone)]
struct TypeSet {
seen_ids: BTreeSet<u32>,
pub work_set: Vec<u32>,
}
impl TypeSet {
fn new() -> Self {
Self {
seen_ids: BTreeSet::new(),
// Average work set size is around 30-50 elements, depending on the metadata size
work_set: Vec::with_capacity(32),
}
}
fn insert(&mut self, id: u32) -> bool {
self.seen_ids.insert(id)
}
fn contains(&mut self, id: u32) -> bool {
self.seen_ids.contains(&id)
}
fn push_to_workset(&mut self, id: u32) {
// Check if wee hit a type we've already inserted; avoid infinite loops and stop.
if self.insert(id) {
self.work_set.push(id);
}
}
/// This function will deeply traverse the initial type and it's dependencies to collect the relevant type_ids
fn collect_types(&mut self, metadata: &Metadata, id: u32) {
self.push_to_workset(id);
while let Some(typ) = self.work_set.pop() {
let typ = resolve_typ(metadata, typ);
match &typ.ty.type_def {
TypeDef::Composite(TypeDefComposite { fields }) => {
for field in fields {
self.push_to_workset(field.ty.id);
}
}
TypeDef::Variant(TypeDefVariant { variants }) => {
for variant in variants {
for field in &variant.fields {
self.push_to_workset(field.ty.id);
}
}
}
TypeDef::Array(TypeDefArray { len: _, type_param })
| TypeDef::Sequence(TypeDefSequence { type_param })
| TypeDef::Compact(TypeDefCompact { type_param }) => {
self.push_to_workset(type_param.id);
}
TypeDef::Tuple(TypeDefTuple { fields }) => {
for field in fields {
self.push_to_workset(field.id);
}
}
TypeDef::Primitive(_) => (),
TypeDef::BitSequence(TypeDefBitSequence {
bit_store_type,
bit_order_type,
}) => {
for typ in [bit_order_type, bit_store_type] {
self.push_to_workset(typ.id);
}
}
}
}
}
fn collect_extrinsic_types(&mut self, extrinsic: &ExtrinsicMetadata) {
for ty in [
extrinsic.address_ty,
extrinsic.call_ty,
extrinsic.signature_ty,
extrinsic.extra_ty,
] {
self.insert(ty);
}
for signed in &extrinsic.transaction_extensions {
self.insert(signed.extra_ty);
self.insert(signed.additional_ty);
}
}
/// Collect all type IDs needed to represent the runtime APIs.
fn collect_runtime_api_types(&mut self, metadata: &Metadata, api: &RuntimeApiMetadataInner) {
for method in api.methods.values() {
self.collect_types(metadata, method.output_ty);
}
}
/// Collect all type IDs needed to represent the provided pallet.
fn collect_pallet_types(&mut self, pallet: &PalletMetadataInner, metadata: &Metadata) {
if let Some(storage) = &pallet.storage {
for entry in storage.entries() {
match entry.entry_type {
StorageEntryType::Plain(ty) => {
self.collect_types(metadata, ty);
}
StorageEntryType::Map {
key_ty, value_ty, ..
} => {
self.collect_types(metadata, key_ty);
self.collect_types(metadata, value_ty);
}
}
}
}
for constant in pallet.constants.values() {
self.collect_types(metadata, constant.ty);
}
}
}
fn resolve_typ(metadata: &Metadata, typ: u32) -> &PortableType {
metadata
.types
.types
.get(typ as usize)
.expect("Metadata should contain enum type in registry")
}
/// Generate a subset of the metadata that contains only the
/// types needed to represent the provided pallets and runtime APIs.
///
/// # Note
///
/// Used to strip metadata of unneeded information and to reduce the
/// binary size.
///
/// # Panics
///
/// Panics if the [`scale_info::PortableRegistry`] did not retain all needed types,
/// or the metadata does not contain the "sp_runtime::DispatchError" type.
pub fn retain_metadata<F, G>(
metadata: &mut Metadata,
mut pallets_filter: F,
mut runtime_apis_filter: G,
) where
F: FnMut(&str) -> bool,
G: FnMut(&str) -> bool,
{
// 1. Delete pallets we don't want to keep.
metadata
.pallets
.retain(|pallet| pallets_filter(&pallet.name));
metadata.pallets_by_index = metadata
.pallets
.values()
.iter()
.enumerate()
.map(|(pos, p)| (p.index, pos))
.collect();
// 2. Delete runtime APIs we don't want to keep.
metadata.apis.retain(|api| runtime_apis_filter(&api.name));
// 3. For each outer enum type, strip it if possible, ie if it is not returned by any
// of the things we're keeping (because if it is, we need to keep all of it so that we
// can still decode values into it).
let outer_enums = metadata.outer_enums();
let mut find_type_id = keep_outer_enum(metadata, &mut pallets_filter, &mut runtime_apis_filter);
for outer_enum_ty_id in [
outer_enums.call_enum_ty(),
outer_enums.error_enum_ty(),
outer_enums.event_enum_ty(),
] {
if !find_type_id(outer_enum_ty_id) {
strip_variants_in_enum_type(metadata, &mut pallets_filter, outer_enum_ty_id);
}
}
// 4. Collect all of the type IDs we still want to keep after deleting.
let mut keep_these_type_ids: BTreeSet<u32> =
iterate_metadata_types(metadata).map(|x| *x).collect();
// 5. Additionally, subxt depends on the `DispatchError` type existing; we use the same
// logic here that is used when building our `Metadata` to ensure we keep it too.
let dispatch_error_ty = metadata
.types
.types
.iter()
.find(|ty| ty.ty.path.segments == ["sp_runtime", "DispatchError"])
.expect("Metadata must contain sp_runtime::DispatchError");
keep_these_type_ids.insert(dispatch_error_ty.id);
// 5. Strip all of the type IDs we no longer need, based on the above set.
let map_ids = metadata
.types
.retain(|id| keep_these_type_ids.contains(&id));
// 6. Now, update the type IDs referenced in our metadata to reflect this.
for id in iterate_metadata_types(metadata) {
if let Some(new_id) = map_ids.get(id) {
*id = *new_id;
} else {
panic!("Type id {id} was not retained. This is a bug");
}
}
}
fn strip_variants_in_enum_type<F>(metadata: &mut Metadata, mut pallets_filter: F, id: u32)
where
F: FnMut(&str) -> bool,
{
let ty = {
metadata
.types
.types
.get_mut(id as usize)
.expect("Metadata should contain enum type in registry")
};
let TypeDef::Variant(variant) = &mut ty.ty.type_def else {
panic!("Metadata type is expected to be a variant type");
};
variant.variants.retain(|v| pallets_filter(&v.name));
}
/// Returns an iterator that allows modifying each type ID seen in the metadata (not recursively).
/// This will iterate over every type referenced in the metadata outside of `metadata.types`.
fn iterate_metadata_types(metadata: &mut Metadata) -> impl Iterator<Item = &mut u32> {
let mut types = alloc::vec::Vec::new();
// collect outer_enum top-level types
let outer_enum = &mut metadata.outer_enums;
types.push(&mut outer_enum.call_enum_ty);
types.push(&mut outer_enum.event_enum_ty);
types.push(&mut outer_enum.error_enum_ty);
// collect pallet top-level type ids
for pallet in metadata.pallets.values_mut() {
if let Some(storage) = &mut pallet.storage {
for entry in storage.entries.values_mut() {
match &mut entry.entry_type {
StorageEntryType::Plain(ty) => {
types.push(ty);
}
StorageEntryType::Map {
key_ty, value_ty, ..
} => {
types.push(key_ty);
types.push(value_ty);
}
}
}
};
if let Some(ty) = &mut pallet.call_ty {
types.push(ty);
}
if let Some(ty) = &mut pallet.event_ty {
types.push(ty);
}
if let Some(ty) = &mut pallet.error_ty {
types.push(ty);
}
for constant in pallet.constants.values_mut() {
types.push(&mut constant.ty);
}
}
// collect extrinsic type_ids
for ty in [
&mut metadata.extrinsic.extra_ty,
&mut metadata.extrinsic.address_ty,
&mut metadata.extrinsic.signature_ty,
&mut metadata.extrinsic.call_ty,
] {
types.push(ty);
}
for signed in &mut metadata.extrinsic.transaction_extensions {
types.push(&mut signed.extra_ty);
types.push(&mut signed.additional_ty);
}
types.push(&mut metadata.runtime_ty);
// collect runtime_api_types
for api in metadata.apis.values_mut() {
for method in api.methods.values_mut() {
for input in &mut method.inputs.iter_mut() {
types.push(&mut input.ty);
}
types.push(&mut method.output_ty);
}
}
types.into_iter()
}
/// Look for a type ID anywhere that we can be given back, ie in constants, storage, extrinsics or runtime API return types.
/// This will recurse deeply into those type IDs to find them.
pub fn keep_outer_enum<F, G>(
metadata: &Metadata,
pallets_filter: &mut F,
runtime_apis_filter: &mut G,
) -> impl FnMut(u32) -> bool
where
F: FnMut(&str) -> bool,
G: FnMut(&str) -> bool,
{
let mut type_set = TypeSet::new();
for pallet in metadata.pallets.values() {
if pallets_filter(&pallet.name) {
type_set.collect_pallet_types(pallet, metadata);
}
}
for api in metadata.apis.values() {
if runtime_apis_filter(&api.name) {
type_set.collect_runtime_api_types(metadata, api);
}
}
type_set.collect_extrinsic_types(&metadata.extrinsic);
move |type_id| type_set.contains(type_id)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Metadata;
use codec::Decode;
use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed};
use std::{fs, path::Path};
fn load_metadata() -> Metadata {
load_metadata_custom("../artifacts/polkadot_metadata_full.scale")
}
fn load_metadata_custom(path: impl AsRef<Path>) -> Metadata {
let bytes = fs::read(path).expect("Cannot read metadata blob");
let meta: RuntimeMetadataPrefixed =
Decode::decode(&mut &*bytes).expect("Cannot decode scale metadata");
match meta.1 {
RuntimeMetadata::V14(v14) => v14.try_into().unwrap(),
RuntimeMetadata::V15(v15) => v15.try_into().unwrap(),
_ => panic!("Unsupported metadata version {:?}", meta.1),
}
}
#[test]
fn retain_one_pallet() {
let metadata_cache = load_metadata();
// Retain one pallet at a time ensuring the test does not panic.
for pallet in metadata_cache.pallets() {
let original_meta = metadata_cache.clone();
let mut metadata = metadata_cache.clone();
retain_metadata(
&mut metadata,
|pallet_name| pallet_name == pallet.name(),
|_| true,
);
assert_eq!(metadata.pallets.len(), 1);
assert_eq!(
&*metadata.pallets.get_by_index(0).unwrap().name,
pallet.name()
);
assert!(
metadata.types.types.len() < original_meta.types.types.len(),
"Stripped metadata must have less retained types than the non-stripped one: stripped amount {}, original amount {}",
metadata.types.types.len(), original_meta.types.types.len()
);
}
}
#[test]
fn retain_one_runtime_api() {
let metadata_cache = load_metadata();
// Retain one runtime API at a time ensuring the test does not panic.
for runtime_api in metadata_cache.runtime_api_traits() {
let mut metadata = metadata_cache.clone();
retain_metadata(
&mut metadata,
|_| true,
|runtime_api_name| runtime_api_name == runtime_api.name(),
);
assert_eq!(metadata.apis.len(), 1);
assert_eq!(
&*metadata.apis.get_by_index(0).unwrap().name,
runtime_api.name()
);
}
}
#[test]
fn issue_1659() {
let full_metadata = load_metadata_custom("../artifacts/regressions/1659.scale");
// Strip metadata to the pallets as described in the issue.
let mut stripped_metadata = full_metadata.clone();
retain_metadata(
&mut stripped_metadata,
{
let set = "Balances,Timestamp,Contracts,ContractsEvm,System"
.split(",")
.collect::<BTreeSet<&str>>();
move |s| set.contains(&s)
},
|_| true,
);
// check that call_enum did not change as it is referenced inside runtime_api
assert_eq!(
stripped_metadata.type_hash(stripped_metadata.outer_enums.call_enum_ty),
full_metadata.type_hash(full_metadata.outer_enums.call_enum_ty)
);
// check that event_num did not change as it is referenced inside runtime_api
assert_eq!(
stripped_metadata.type_hash(stripped_metadata.outer_enums.event_enum_ty),
full_metadata.type_hash(full_metadata.outer_enums.event_enum_ty)
);
}
}
+141 -198
View File
@@ -6,15 +6,13 @@
use crate::{
CustomMetadata, CustomValueMetadata, ExtrinsicMetadata, Metadata, PalletMetadata,
RuntimeApiMetadata, RuntimeApiMethodMetadata, StorageEntryMetadata, StorageEntryType,
PalletViewFunctionMetadata, RuntimeApiMetadata, RuntimeApiMethodMetadata, StorageEntryMetadata,
StorageEntryType,
};
use alloc::vec::Vec;
use hashbrown::HashMap;
use outer_enum_hashes::OuterEnumHashes;
use scale_info::{form::PortableForm, Field, PortableRegistry, TypeDef, TypeDefVariant, Variant};
pub mod outer_enum_hashes;
// The number of bytes our `hash` function produces.
pub(crate) const HASH_LEN: usize = 32;
pub type Hash = [u8; HASH_LEN];
@@ -79,7 +77,6 @@ fn get_field_hash(
registry: &PortableRegistry,
field: &Field<PortableForm>,
cache: &mut HashMap<u32, CachedHash>,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
let field_name_bytes = match &field.name {
Some(name) => hash(name.as_bytes()),
@@ -88,7 +85,7 @@ fn get_field_hash(
concat_and_hash2(
&field_name_bytes,
&get_type_hash_recurse(registry, field.ty.id, cache, outer_enum_hashes),
&get_type_hash_recurse(registry, field.ty.id, cache),
)
}
@@ -97,16 +94,12 @@ fn get_variant_hash(
registry: &PortableRegistry,
var: &Variant<PortableForm>,
cache: &mut HashMap<u32, CachedHash>,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
let variant_name_bytes = hash(var.name.as_bytes());
let variant_field_bytes = var.fields.iter().fold([0u8; HASH_LEN], |bytes, field| {
// EncodeAsType and DecodeAsType don't care about variant field ordering,
// so XOR the fields to ensure that it doesn't matter.
xor(
bytes,
get_field_hash(registry, field, cache, outer_enum_hashes),
)
xor(bytes, get_field_hash(registry, field, cache))
});
concat_and_hash2(&variant_name_bytes, &variant_field_bytes)
@@ -117,7 +110,6 @@ fn get_type_def_variant_hash(
variant: &TypeDefVariant<PortableForm>,
only_these_variants: Option<&[&str]>,
cache: &mut HashMap<u32, CachedHash>,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
let variant_id_bytes = [TypeBeingHashed::Variant as u8; HASH_LEN];
let variant_field_bytes = variant.variants.iter().fold([0u8; HASH_LEN], |bytes, var| {
@@ -128,10 +120,7 @@ fn get_type_def_variant_hash(
.map(|only_these_variants| only_these_variants.contains(&var.name.as_str()))
.unwrap_or(true);
if should_hash {
xor(
bytes,
get_variant_hash(registry, var, cache, outer_enum_hashes),
)
xor(bytes, get_variant_hash(registry, var, cache))
} else {
bytes
}
@@ -144,7 +133,6 @@ fn get_type_def_hash(
registry: &PortableRegistry,
ty_def: &TypeDef<PortableForm>,
cache: &mut HashMap<u32, CachedHash>,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
match ty_def {
TypeDef::Composite(composite) => {
@@ -156,19 +144,14 @@ fn get_type_def_hash(
.fold([0u8; HASH_LEN], |bytes, field| {
// With EncodeAsType and DecodeAsType we no longer care which order the fields are in,
// as long as all of the names+types are there. XOR to not care about ordering.
xor(
bytes,
get_field_hash(registry, field, cache, outer_enum_hashes),
)
xor(bytes, get_field_hash(registry, field, cache))
});
concat_and_hash2(&composite_id_bytes, &composite_field_bytes)
}
TypeDef::Variant(variant) => {
get_type_def_variant_hash(registry, variant, None, cache, outer_enum_hashes)
}
TypeDef::Variant(variant) => get_type_def_variant_hash(registry, variant, None, cache),
TypeDef::Sequence(sequence) => concat_and_hash2(
&[TypeBeingHashed::Sequence as u8; HASH_LEN],
&get_type_hash_recurse(registry, sequence.type_param.id, cache, outer_enum_hashes),
&get_type_hash_recurse(registry, sequence.type_param.id, cache),
),
TypeDef::Array(array) => {
// Take length into account too; different length must lead to different hash.
@@ -180,16 +163,13 @@ fn get_type_def_hash(
};
concat_and_hash2(
&array_id_bytes,
&get_type_hash_recurse(registry, array.type_param.id, cache, outer_enum_hashes),
&get_type_hash_recurse(registry, array.type_param.id, cache),
)
}
TypeDef::Tuple(tuple) => {
let mut bytes = hash(&[TypeBeingHashed::Tuple as u8]);
for field in &tuple.fields {
bytes = concat_and_hash2(
&bytes,
&get_type_hash_recurse(registry, field.id, cache, outer_enum_hashes),
);
bytes = concat_and_hash2(&bytes, &get_type_hash_recurse(registry, field.id, cache));
}
bytes
}
@@ -199,12 +179,12 @@ fn get_type_def_hash(
}
TypeDef::Compact(compact) => concat_and_hash2(
&[TypeBeingHashed::Compact as u8; HASH_LEN],
&get_type_hash_recurse(registry, compact.type_param.id, cache, outer_enum_hashes),
&get_type_hash_recurse(registry, compact.type_param.id, cache),
),
TypeDef::BitSequence(bitseq) => concat_and_hash3(
&[TypeBeingHashed::BitSequence as u8; HASH_LEN],
&get_type_hash_recurse(registry, bitseq.bit_order_type.id, cache, outer_enum_hashes),
&get_type_hash_recurse(registry, bitseq.bit_store_type.id, cache, outer_enum_hashes),
&get_type_hash_recurse(registry, bitseq.bit_order_type.id, cache),
&get_type_hash_recurse(registry, bitseq.bit_store_type.id, cache),
),
}
}
@@ -235,12 +215,8 @@ impl CachedHash {
///
/// The reason for this unintuitive behavior is that we sometimes want to trim the outer enum types
/// beforehand to only include certain pallets, which affects their hash values.
pub fn get_type_hash(
registry: &PortableRegistry,
id: u32,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
get_type_hash_recurse(registry, id, &mut HashMap::new(), outer_enum_hashes)
pub fn get_type_hash(registry: &PortableRegistry, id: u32) -> Hash {
get_type_hash_recurse(registry, id, &mut HashMap::new())
}
/// Obtain the hash representation of a `scale_info::Type` identified by id.
@@ -248,13 +224,7 @@ fn get_type_hash_recurse(
registry: &PortableRegistry,
id: u32,
cache: &mut HashMap<u32, CachedHash>,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
// If the type is part of precomputed outer enum hashes, the respective hash is used instead:
if let Some(hash) = outer_enum_hashes.resolve(id) {
return hash;
}
// Guard against recursive types, with a 2 step caching approach:
// if the cache has an entry for the id, just return a hash derived from it.
// if the type has not been seen yet, mark it with `CachedHash::Recursive` in the cache and proceed to `get_type_def_hash()`.
@@ -275,22 +245,17 @@ fn get_type_hash_recurse(
let ty = registry
.resolve(id)
.expect("Type ID provided by the metadata is registered; qed");
let type_hash = get_type_def_hash(registry, &ty.type_def, cache, outer_enum_hashes);
let type_hash = get_type_def_hash(registry, &ty.type_def, cache);
cache.insert(id, CachedHash::Hash(type_hash));
type_hash
}
/// Obtain the hash representation of a `frame_metadata::v15::ExtrinsicMetadata`.
fn get_extrinsic_hash(
registry: &PortableRegistry,
extrinsic: &ExtrinsicMetadata,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
fn get_extrinsic_hash(registry: &PortableRegistry, extrinsic: &ExtrinsicMetadata) -> Hash {
// Get the hashes of the extrinsic type.
let address_hash = get_type_hash(registry, extrinsic.address_ty, outer_enum_hashes);
let address_hash = get_type_hash(registry, extrinsic.address_ty);
// The `RuntimeCall` type is intentionally omitted and hashed by the outer enums instead.
let signature_hash = get_type_hash(registry, extrinsic.signature_ty, outer_enum_hashes);
let extra_hash = get_type_hash(registry, extrinsic.extra_ty, outer_enum_hashes);
let signature_hash = get_type_hash(registry, extrinsic.signature_ty);
// Supported versions are just u8s and we will likely never have more than 32 of these, so put them into
// an array of u8s and panic if more than 32.
@@ -303,10 +268,9 @@ fn get_extrinsic_hash(
a
};
let mut bytes = concat_and_hash4(
let mut bytes = concat_and_hash3(
&address_hash,
&signature_hash,
&extra_hash,
&supported_extrinsic_versions,
);
@@ -314,8 +278,8 @@ fn get_extrinsic_hash(
bytes = concat_and_hash4(
&bytes,
&hash(signed_extension.identifier.as_bytes()),
&get_type_hash(registry, signed_extension.extra_ty, outer_enum_hashes),
&get_type_hash(registry, signed_extension.additional_ty, outer_enum_hashes),
&get_type_hash(registry, signed_extension.extra_ty),
&get_type_hash(registry, signed_extension.additional_ty),
)
}
@@ -323,11 +287,7 @@ fn get_extrinsic_hash(
}
/// Get the hash corresponding to a single storage entry.
fn get_storage_entry_hash(
registry: &PortableRegistry,
entry: &StorageEntryMetadata,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
fn get_storage_entry_hash(registry: &PortableRegistry, entry: &StorageEntryMetadata) -> Hash {
let mut bytes = concat_and_hash3(
&hash(entry.name.as_bytes()),
// Cloning 'entry.modifier' should essentially be a copy.
@@ -336,9 +296,7 @@ fn get_storage_entry_hash(
);
match &entry.entry_type {
StorageEntryType::Plain(ty) => {
concat_and_hash2(&bytes, &get_type_hash(registry, *ty, outer_enum_hashes))
}
StorageEntryType::Plain(ty) => concat_and_hash2(&bytes, &get_type_hash(registry, *ty)),
StorageEntryType::Map {
hashers,
key_ty,
@@ -350,83 +308,18 @@ fn get_storage_entry_hash(
}
concat_and_hash3(
&bytes,
&get_type_hash(registry, *key_ty, outer_enum_hashes),
&get_type_hash(registry, *value_ty, outer_enum_hashes),
&get_type_hash(registry, *key_ty),
&get_type_hash(registry, *value_ty),
)
}
}
}
/// Get the hash corresponding to a single runtime API method.
fn get_runtime_method_hash(
registry: &PortableRegistry,
trait_name: &str,
method_metadata: &RuntimeApiMethodMetadata,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
// The trait name is part of the runtime API call that is being
// generated for this method. Therefore the trait name is strongly
// connected to the method in the same way as a parameter is
// to the method.
let mut bytes = concat_and_hash2(
&hash(trait_name.as_bytes()),
&hash(method_metadata.name.as_bytes()),
);
for input in &method_metadata.inputs {
bytes = concat_and_hash3(
&bytes,
&hash(input.name.as_bytes()),
&get_type_hash(registry, input.ty, outer_enum_hashes),
);
}
bytes = concat_and_hash2(
&bytes,
&get_type_hash(registry, method_metadata.output_ty, outer_enum_hashes),
);
bytes
}
/// Obtain the hash of all of a runtime API trait, including all of its methods.
pub fn get_runtime_trait_hash(
trait_metadata: RuntimeApiMetadata,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
let trait_name = &*trait_metadata.inner.name;
let method_bytes = trait_metadata
.methods()
.fold([0u8; HASH_LEN], |bytes, method_metadata| {
// We don't care what order the trait methods exist in, and want the hash to
// be identical regardless. For this, we can just XOR the hashes for each method
// together; we'll get the same output whichever order they are XOR'd together in,
// so long as each individual method is the same.
xor(
bytes,
get_runtime_method_hash(
trait_metadata.types,
trait_name,
method_metadata,
outer_enum_hashes,
),
)
});
concat_and_hash2(&hash(trait_name.as_bytes()), &method_bytes)
}
fn get_custom_metadata_hash(
custom_metadata: &CustomMetadata,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
fn get_custom_metadata_hash(custom_metadata: &CustomMetadata) -> Hash {
custom_metadata
.iter()
.fold([0u8; HASH_LEN], |bytes, custom_value| {
xor(
bytes,
get_custom_value_hash(&custom_value, outer_enum_hashes),
)
xor(bytes, get_custom_value_hash(&custom_value))
})
}
@@ -434,21 +327,14 @@ fn get_custom_metadata_hash(
///
/// If the `custom_value` has a type id that is not present in the metadata,
/// only the name and bytes are used for hashing.
pub fn get_custom_value_hash(
custom_value: &CustomValueMetadata,
outer_enum_hashes: &OuterEnumHashes,
) -> Hash {
pub fn get_custom_value_hash(custom_value: &CustomValueMetadata) -> Hash {
let name_hash = hash(custom_value.name.as_bytes());
if custom_value.types.resolve(custom_value.type_id()).is_none() {
hash(&name_hash)
} else {
concat_and_hash2(
&name_hash,
&get_type_hash(
custom_value.types,
custom_value.type_id(),
outer_enum_hashes,
),
&get_type_hash(custom_value.types, custom_value.type_id()),
)
}
}
@@ -457,7 +343,7 @@ pub fn get_custom_value_hash(
pub fn get_storage_hash(pallet: &PalletMetadata, entry_name: &str) -> Option<Hash> {
let storage = pallet.storage()?;
let entry = storage.entry_by_name(entry_name)?;
let hash = get_storage_entry_hash(pallet.types, entry, &OuterEnumHashes::empty());
let hash = get_storage_entry_hash(pallet.types, entry);
Some(hash)
}
@@ -466,7 +352,7 @@ pub fn get_constant_hash(pallet: &PalletMetadata, constant_name: &str) -> Option
let constant = pallet.constant_by_name(constant_name)?;
// We only need to check that the type of the constant asked for matches.
let bytes = get_type_hash(pallet.types, constant.ty, &OuterEnumHashes::empty());
let bytes = get_type_hash(pallet.types, constant.ty);
Some(bytes)
}
@@ -475,42 +361,102 @@ pub fn get_call_hash(pallet: &PalletMetadata, call_name: &str) -> Option<Hash> {
let call_variant = pallet.call_variant_by_name(call_name)?;
// hash the specific variant representing the call we are interested in.
let hash = get_variant_hash(
pallet.types,
call_variant,
&mut HashMap::new(),
&OuterEnumHashes::empty(),
);
let hash = get_variant_hash(pallet.types, call_variant, &mut HashMap::new());
Some(hash)
}
/// Obtain the hash of a specific runtime API function, or an error if it's not found.
pub fn get_runtime_api_hash(runtime_apis: &RuntimeApiMetadata, method_name: &str) -> Option<Hash> {
let trait_name = &*runtime_apis.inner.name;
let method_metadata = runtime_apis.method_by_name(method_name)?;
/// Obtain the hash of a specific runtime API method, or an error if it's not found.
pub fn get_runtime_api_hash(runtime_api: &RuntimeApiMethodMetadata) -> Hash {
let registry = runtime_api.types;
Some(get_runtime_method_hash(
runtime_apis.types,
trait_name,
method_metadata,
&OuterEnumHashes::empty(),
))
// The trait name is part of the runtime API call that is being
// generated for this method. Therefore the trait name is strongly
// connected to the method in the same way as a parameter is
// to the method.
let mut bytes = concat_and_hash2(
&hash(runtime_api.trait_name.as_bytes()),
&hash(runtime_api.name().as_bytes()),
);
for input in runtime_api.inputs() {
bytes = concat_and_hash3(
&bytes,
&hash(input.name.as_bytes()),
&get_type_hash(registry, input.ty),
);
}
bytes = concat_and_hash2(&bytes, &get_type_hash(registry, runtime_api.output_ty()));
bytes
}
/// Obtain the hash of all of a runtime API trait, including all of its methods.
pub fn get_runtime_apis_hash(trait_metadata: RuntimeApiMetadata) -> Hash {
// Each API is already hashed considering the trait name, so we don't need
// to consider thr trait name again here.
trait_metadata
.methods()
.fold([0u8; HASH_LEN], |bytes, method_metadata| {
// We don't care what order the trait methods exist in, and want the hash to
// be identical regardless. For this, we can just XOR the hashes for each method
// together; we'll get the same output whichever order they are XOR'd together in,
// so long as each individual method is the same.
xor(bytes, get_runtime_api_hash(&method_metadata))
})
}
/// Obtain the hash of a specific pallet view function, or an error if it's not found.
pub fn get_pallet_view_function_hash(view_function: &PalletViewFunctionMetadata) -> Hash {
let registry = view_function.types;
// The Query ID is `twox_128(pallet_name) ++ twox_128("fn_name(fnarg_types) -> return_ty")`.
let mut bytes = view_function.query_id();
// This only takes type _names_ into account, so we beef this up by combining with actual
// type hashes, in a similar approach to runtime APIs..
for input in view_function.inputs() {
bytes = concat_and_hash3(
&bytes,
&hash(input.name.as_bytes()),
&get_type_hash(registry, input.ty),
);
}
bytes = concat_and_hash2(&bytes, &get_type_hash(registry, view_function.output_ty()));
bytes
}
/// Obtain the hash of all of the view functions in a pallet, including all of its methods.
fn get_pallet_view_functions_hash(pallet_metadata: &PalletMetadata) -> Hash {
// Each API is already hashed considering the trait name, so we don't need
// to consider thr trait name again here.
pallet_metadata
.view_functions()
.fold([0u8; HASH_LEN], |bytes, method_metadata| {
// We don't care what order the view functions are declared in, and want the hash to
// be identical regardless. For this, we can just XOR the hashes for each method
// together; we'll get the same output whichever order they are XOR'd together in,
// so long as each individual method is the same.
xor(bytes, get_pallet_view_function_hash(&method_metadata))
})
}
/// Obtain the hash representation of a `frame_metadata::v15::PalletMetadata`.
pub fn get_pallet_hash(pallet: PalletMetadata, outer_enum_hashes: &OuterEnumHashes) -> Hash {
pub fn get_pallet_hash(pallet: PalletMetadata) -> Hash {
let registry = pallet.types;
let call_bytes = match pallet.call_ty_id() {
Some(calls) => get_type_hash(registry, calls, outer_enum_hashes),
Some(calls) => get_type_hash(registry, calls),
None => [0u8; HASH_LEN],
};
let event_bytes = match pallet.event_ty_id() {
Some(event) => get_type_hash(registry, event, outer_enum_hashes),
Some(event) => get_type_hash(registry, event),
None => [0u8; HASH_LEN],
};
let error_bytes = match pallet.error_ty_id() {
Some(error) => get_type_hash(registry, error, outer_enum_hashes),
Some(error) => get_type_hash(registry, error),
None => [0u8; HASH_LEN],
};
let constant_bytes = pallet.constants().fold([0u8; HASH_LEN], |bytes, constant| {
@@ -518,7 +464,7 @@ pub fn get_pallet_hash(pallet: PalletMetadata, outer_enum_hashes: &OuterEnumHash
// of (constantName, constantType) to make the order we see them irrelevant.
let constant_hash = concat_and_hash2(
&hash(constant.name.as_bytes()),
&get_type_hash(registry, constant.ty(), outer_enum_hashes),
&get_type_hash(registry, constant.ty()),
);
xor(bytes, constant_hash)
});
@@ -531,23 +477,22 @@ pub fn get_pallet_hash(pallet: PalletMetadata, outer_enum_hashes: &OuterEnumHash
.fold([0u8; HASH_LEN], |bytes, entry| {
// We don't care what order the storage entries occur in, so XOR them together
// to make the order irrelevant.
xor(
bytes,
get_storage_entry_hash(registry, entry, outer_enum_hashes),
)
xor(bytes, get_storage_entry_hash(registry, entry))
});
concat_and_hash2(&prefix_hash, &entries_hash)
}
None => [0u8; HASH_LEN],
};
let view_functions_bytes = get_pallet_view_functions_hash(&pallet);
// Hash all of the above together:
concat_and_hash5(
concat_and_hash6(
&call_bytes,
&event_bytes,
&error_bytes,
&constant_bytes,
&storage_bytes,
&view_functions_bytes,
)
}
@@ -597,14 +542,6 @@ impl<'a> MetadataHasher<'a> {
pub fn hash(&self) -> Hash {
let metadata = self.metadata;
// Get the hashes of outer enums, considering only `specific_pallets` (if any are set).
// If any of the typed that represent outer enums are encountered later, hashes from `top_level_enum_hashes` can be substituted.
let outer_enum_hashes = OuterEnumHashes::new(
metadata,
self.specific_pallets.as_deref(),
self.specific_runtime_apis.as_deref(),
);
let pallet_hash = metadata.pallets().fold([0u8; HASH_LEN], |bytes, pallet| {
// If specific pallets are given, only include this pallet if it is in the specific pallets.
let should_hash = self
@@ -615,7 +552,7 @@ impl<'a> MetadataHasher<'a> {
// We don't care what order the pallets are seen in, so XOR their
// hashes together to be order independent.
if should_hash {
xor(bytes, get_pallet_hash(pallet, &outer_enum_hashes))
xor(bytes, get_pallet_hash(pallet))
} else {
bytes
}
@@ -633,27 +570,30 @@ impl<'a> MetadataHasher<'a> {
// We don't care what order the runtime APIs are seen in, so XOR their
// hashes together to be order independent.
if should_hash {
xor(bytes, get_runtime_trait_hash(api, &outer_enum_hashes))
xor(bytes, get_runtime_apis_hash(api))
} else {
bytes
}
});
let extrinsic_hash =
get_extrinsic_hash(&metadata.types, &metadata.extrinsic, &outer_enum_hashes);
let runtime_hash =
get_type_hash(&metadata.types, metadata.runtime_ty(), &outer_enum_hashes);
let outer_enums_hash = concat_and_hash3(
&get_type_hash(&metadata.types, metadata.outer_enums.call_enum_ty),
&get_type_hash(&metadata.types, metadata.outer_enums.event_enum_ty),
&get_type_hash(&metadata.types, metadata.outer_enums.error_enum_ty),
);
let extrinsic_hash = get_extrinsic_hash(&metadata.types, &metadata.extrinsic);
let custom_values_hash = self
.include_custom_values
.then(|| get_custom_metadata_hash(&metadata.custom(), &outer_enum_hashes))
.then(|| get_custom_metadata_hash(&metadata.custom()))
.unwrap_or_default();
concat_and_hash6(
concat_and_hash5(
&pallet_hash,
&apis_hash,
&outer_enums_hash,
&extrinsic_hash,
&runtime_hash,
&outer_enum_hashes.combined_hash(),
&custom_values_hash,
)
}
@@ -886,11 +826,10 @@ mod tests {
let registry: PortableRegistry = registry.into();
let mut cache = HashMap::new();
let ignored_enums = &OuterEnumHashes::empty();
let a_hash = get_type_hash_recurse(&registry, a_type_id, &mut cache, ignored_enums);
let a_hash2 = get_type_hash_recurse(&registry, a_type_id, &mut cache, ignored_enums);
let b_hash = get_type_hash_recurse(&registry, b_type_id, &mut cache, ignored_enums);
let a_hash = get_type_hash_recurse(&registry, a_type_id, &mut cache);
let a_hash2 = get_type_hash_recurse(&registry, a_type_id, &mut cache);
let b_hash = get_type_hash_recurse(&registry, b_type_id, &mut cache);
let CachedHash::Hash(a_cache_hash) = cache[&a_type_id] else {
panic!()
@@ -1123,7 +1062,7 @@ mod tests {
PalletEventMetadata, PalletStorageMetadata, StorageEntryMetadata, StorageEntryModifier,
};
fn metadata_with_pallet_events() -> Metadata {
fn metadata_with_pallet_events() -> v15::RuntimeMetadataV15 {
#[allow(dead_code)]
#[derive(scale_info::TypeInfo)]
struct FirstEvent {
@@ -1236,20 +1175,24 @@ mod tests {
map: Default::default(),
},
)
.try_into()
.expect("can build valid metadata")
}
#[test]
fn hash_comparison_trimmed_metadata() {
use subxt_utils_stripmetadata::StripMetadata;
// trim the metadata:
let metadata = metadata_with_pallet_events();
let trimmed_metadata = {
let mut m = metadata.clone();
m.retain(|e| e == "First", |_| true);
m.strip_metadata(|e| e == "First", |_| true);
m
};
// Now convert it into our inner repr:
let metadata = Metadata::try_from(metadata).unwrap();
let trimmed_metadata = Metadata::try_from(trimmed_metadata).unwrap();
// test that the hashes are the same:
let hash = MetadataHasher::new(&metadata)
.only_these_pallets(&["First"])
@@ -1,114 +0,0 @@
//! Hash representations of the `frame_metadata::v15::OuterEnums`.
use hashbrown::HashMap;
use scale_info::{PortableRegistry, TypeDef};
use crate::{
utils::{
retain,
validation::{get_type_def_variant_hash, get_type_hash},
},
Metadata,
};
use super::{concat_and_hash3, Hash, HASH_LEN};
/// Hash representations of the `frame_metadata::v15::OuterEnums`.
pub struct OuterEnumHashes {
call_hash: (u32, Hash),
error_hash: (u32, Hash),
event_hash: (u32, Hash),
}
impl OuterEnumHashes {
/// Constructs new `OuterEnumHashes` from metadata. If `only_these_variants` is set, the enums are stripped down to only these variants, before their hashes are calculated.
pub fn new(
metadata: &Metadata,
specific_pallets: Option<&[&str]>,
specific_runtimes: Option<&[&str]>,
) -> Self {
let filter = |names: Option<&[&str]>, name: &str| match names {
Some(names) => names.contains(&name),
None => true,
};
let mut check_enum_type_id = retain::keep_outer_enum(
metadata,
&mut |name| filter(specific_pallets, name),
&mut |name| filter(specific_runtimes, name),
);
let variants = |filter: bool| {
if !filter {
specific_pallets
} else {
None
}
};
fn get_enum_hash(
registry: &PortableRegistry,
id: u32,
only_these_variants: Option<&[&str]>,
) -> Hash {
let ty = registry
.types
.get(id as usize)
.expect("Metadata should contain enum type in registry");
if let TypeDef::Variant(variant) = &ty.ty.type_def {
get_type_def_variant_hash(
registry,
variant,
only_these_variants,
&mut HashMap::new(),
// ignored, because not computed yet...
&OuterEnumHashes::empty(),
)
} else {
get_type_hash(registry, id, &OuterEnumHashes::empty())
}
}
let enums = &metadata.outer_enums;
let call_variants = variants(check_enum_type_id(enums.call_enum_ty));
let call_hash = get_enum_hash(metadata.types(), enums.call_enum_ty, call_variants);
let event_variants = variants(check_enum_type_id(enums.event_enum_ty));
let event_hash = get_enum_hash(metadata.types(), enums.event_enum_ty, event_variants);
let error_variants = variants(check_enum_type_id(enums.error_enum_ty));
let error_hash = get_enum_hash(metadata.types(), enums.error_enum_ty, error_variants);
Self {
call_hash: (enums.call_enum_ty, call_hash),
error_hash: (enums.error_enum_ty, error_hash),
event_hash: (enums.event_enum_ty, event_hash),
}
}
/// Constructs empty `OuterEnumHashes` with type ids that are never a real type id.
/// Can be used as a placeholder when outer enum hashes are required but should be ignored.
pub fn empty() -> Self {
Self {
call_hash: (u32::MAX, [0; HASH_LEN]),
error_hash: (u32::MAX, [0; HASH_LEN]),
event_hash: (u32::MAX, [0; HASH_LEN]),
}
}
/// Returns a combined hash of the top level enums.
pub fn combined_hash(&self) -> Hash {
concat_and_hash3(&self.call_hash.1, &self.error_hash.1, &self.event_hash.1)
}
/// Checks if a type is one of the 3 top level enum types. If so, returns Some(hash).
///
/// This is useful, because top level enums are sometimes stripped down to only certain pallets.
/// The hashes of these stripped down types are stored in this struct.
pub fn resolve(&self, id: u32) -> Option<[u8; HASH_LEN]> {
match id {
e if e == self.error_hash.0 => Some(self.error_hash.1),
e if e == self.event_hash.0 => Some(self.event_hash.1),
e if e == self.call_hash.0 => Some(self.call_hash.1),
_ => None,
}
}
}