Strip metadata fix (#1774)

* Fix metadata stripping being too eager

closes #1659

---------

Co-authored-by: James Wilson <james@jsdw.me>
Co-authored-by: Niklas Adolfsson <niklasadolfsson1@gmail.com>
This commit is contained in:
Pavlo Khrystenko
2024-10-21 22:41:14 +02:00
committed by GitHub
parent 9527165a40
commit ae0fce8dff
7 changed files with 350 additions and 233 deletions
Generated
-1
View File
@@ -10712,7 +10712,6 @@ dependencies = [
name = "subxt-metadata"
version = "0.37.0"
dependencies = [
"assert_matches",
"bitvec",
"criterion",
"frame-decode",
Binary file not shown.
-1
View File
@@ -29,7 +29,6 @@ hashbrown = { workspace = true }
bitvec = { workspace = true, features = ["alloc"] }
criterion = { workspace = true }
scale-info = { workspace = true, features = ["bit-vec"] }
assert_matches = { workspace = true }
[lib]
# Without this, libtest cli opts interfere with criteron benches:
+4 -1
View File
@@ -214,7 +214,10 @@ impl Metadata {
MetadataHasher::new(self)
}
/// Filter out any pallets that we don't want to keep, retaining only those that we do.
/// Filter out any pallets and/or runtime_apis that we don't want to keep, retaining only those that we do.
/// Note:
/// only filter by `pallet`s will not lead to significant metadata size reduction because the return types are kept to ensure that those can be decoded.
///
pub fn retain<F, G>(&mut self, pallet_filter: F, api_filter: G)
where
F: FnMut(&str) -> bool,
+309 -224
View File
@@ -5,187 +5,139 @@
//! Utility functions to generate a subset of the metadata.
use crate::{
ExtrinsicMetadata, Metadata, OuterEnumsMetadata, PalletMetadataInner, RuntimeApiMetadataInner,
StorageEntryType,
ExtrinsicMetadata, Metadata, PalletMetadataInner, RuntimeApiMetadataInner, StorageEntryType,
};
use alloc::collections::BTreeSet;
use alloc::vec::Vec;
use scale_info::{
PortableType, TypeDef, TypeDefArray, TypeDefBitSequence, TypeDefCompact, TypeDefComposite,
TypeDefSequence, TypeDefTuple, TypeDefVariant,
};
use alloc::collections::BTreeMap;
use hashbrown::HashSet;
use scale_info::TypeDef;
/// Collect all type IDs needed to represent the provided pallet.
fn collect_pallet_types(pallet: &PalletMetadataInner, type_ids: &mut HashSet<u32>) {
if let Some(storage) = &pallet.storage {
for entry in storage.entries() {
match entry.entry_type {
StorageEntryType::Plain(ty) => {
type_ids.insert(ty);
#[derive(Clone)]
struct TypeSet {
seen_ids: BTreeSet<u32>,
pub work_set: Vec<u32>,
}
impl TypeSet {
fn new() -> Self {
Self {
seen_ids: BTreeSet::new(),
// Average work set size is around 30-50 elements, depending on the metadata size
work_set: Vec::with_capacity(32),
}
}
fn insert(&mut self, id: u32) -> bool {
self.seen_ids.insert(id)
}
fn contains(&mut self, id: u32) -> bool {
self.seen_ids.contains(&id)
}
fn push_to_workset(&mut self, id: u32) {
// Check if wee hit a type we've already inserted; avoid infinite loops and stop.
if self.insert(id) {
self.work_set.push(id);
}
}
/// This function will deeply traverse the inital type and it's dependencies to collect the relevant type_ids
fn collect_types(&mut self, metadata: &Metadata, id: u32) {
self.push_to_workset(id);
while let Some(typ) = self.work_set.pop() {
let typ = resolve_typ(metadata, typ);
match &typ.ty.type_def {
TypeDef::Composite(TypeDefComposite { fields }) => {
for field in fields {
self.push_to_workset(field.ty.id);
}
}
StorageEntryType::Map {
key_ty, value_ty, ..
} => {
type_ids.insert(key_ty);
type_ids.insert(value_ty);
TypeDef::Variant(TypeDefVariant { variants }) => {
for variant in variants {
for field in &variant.fields {
self.push_to_workset(field.ty.id);
}
}
}
TypeDef::Array(TypeDefArray { len: _, type_param })
| TypeDef::Sequence(TypeDefSequence { type_param })
| TypeDef::Compact(TypeDefCompact { type_param }) => {
self.push_to_workset(type_param.id);
}
TypeDef::Tuple(TypeDefTuple { fields }) => {
for field in fields {
self.push_to_workset(field.id);
}
}
TypeDef::Primitive(_) => (),
TypeDef::BitSequence(TypeDefBitSequence {
bit_store_type,
bit_order_type,
}) => {
for typ in [bit_order_type, bit_store_type] {
self.push_to_workset(typ.id);
}
}
}
}
}
if let Some(ty) = pallet.call_ty {
type_ids.insert(ty);
fn collect_extrinsic_types(&mut self, extrinsic: &ExtrinsicMetadata) {
for ty in [
extrinsic.address_ty,
extrinsic.call_ty,
extrinsic.signature_ty,
extrinsic.extra_ty,
] {
self.insert(ty);
}
for signed in &extrinsic.signed_extensions {
self.insert(signed.extra_ty);
self.insert(signed.additional_ty);
}
}
if let Some(ty) = pallet.event_ty {
type_ids.insert(ty);
/// Collect all type IDs needed to represent the runtime APIs.
fn collect_runtime_api_types(&mut self, metadata: &Metadata, api: &RuntimeApiMetadataInner) {
for method in api.methods.values() {
self.collect_types(metadata, method.output_ty);
}
}
for constant in pallet.constants.values() {
type_ids.insert(constant.ty);
}
if let Some(ty) = pallet.error_ty {
type_ids.insert(ty);
}
}
/// Update all type IDs of the provided pallet using the new type IDs from the portable registry.
fn update_pallet_types(pallet: &mut PalletMetadataInner, map_ids: &BTreeMap<u32, u32>) {
if let Some(storage) = &mut pallet.storage {
for entry in storage.entries.values_mut() {
match &mut entry.entry_type {
StorageEntryType::Plain(ty) => {
update_type(ty, map_ids);
}
StorageEntryType::Map {
key_ty, value_ty, ..
} => {
update_type(key_ty, map_ids);
update_type(value_ty, map_ids);
/// Collect all type IDs needed to represent the provided pallet.
fn collect_pallet_types(&mut self, pallet: &PalletMetadataInner, metadata: &Metadata) {
if let Some(storage) = &pallet.storage {
for entry in storage.entries() {
match entry.entry_type {
StorageEntryType::Plain(ty) => {
self.collect_types(metadata, ty);
}
StorageEntryType::Map {
key_ty, value_ty, ..
} => {
self.collect_types(metadata, key_ty);
self.collect_types(metadata, value_ty);
}
}
}
}
}
if let Some(ty) = &mut pallet.call_ty {
update_type(ty, map_ids);
}
if let Some(ty) = &mut pallet.event_ty {
update_type(ty, map_ids);
}
if let Some(ty) = &mut pallet.error_ty {
update_type(ty, map_ids);
}
for constant in pallet.constants.values_mut() {
update_type(&mut constant.ty, map_ids);
}
}
/// Collect all type IDs needed to represent the extrinsic metadata.
fn collect_extrinsic_types(extrinsic: &ExtrinsicMetadata, type_ids: &mut HashSet<u32>) {
type_ids.insert(extrinsic.address_ty);
type_ids.insert(extrinsic.call_ty);
type_ids.insert(extrinsic.signature_ty);
type_ids.insert(extrinsic.extra_ty);
for signed in &extrinsic.signed_extensions {
type_ids.insert(signed.extra_ty);
type_ids.insert(signed.additional_ty);
}
}
/// Update all type IDs of the provided extrinsic metadata using the new type IDs from the portable registry.
fn update_extrinsic_types(extrinsic: &mut ExtrinsicMetadata, map_ids: &BTreeMap<u32, u32>) {
update_type(&mut extrinsic.address_ty, map_ids);
update_type(&mut extrinsic.call_ty, map_ids);
update_type(&mut extrinsic.signature_ty, map_ids);
update_type(&mut extrinsic.extra_ty, map_ids);
for signed in &mut extrinsic.signed_extensions {
update_type(&mut signed.extra_ty, map_ids);
update_type(&mut signed.additional_ty, map_ids);
}
}
/// Collect all type IDs needed to represent the runtime APIs.
fn collect_runtime_api_types(api: &RuntimeApiMetadataInner, type_ids: &mut HashSet<u32>) {
for method in api.methods.values() {
for input in &method.inputs {
type_ids.insert(input.ty);
}
type_ids.insert(method.output_ty);
}
}
/// Update all type IDs of the provided runtime APIs metadata using the new type IDs from the portable registry.
fn update_runtime_api_types(apis: &mut [RuntimeApiMetadataInner], map_ids: &BTreeMap<u32, u32>) {
for api in apis {
for method in api.methods.values_mut() {
for input in &mut method.inputs {
update_type(&mut input.ty, map_ids);
}
update_type(&mut method.output_ty, map_ids);
for constant in pallet.constants.values() {
self.collect_types(metadata, constant.ty);
}
}
}
/// Collect the outer enums type IDs.
fn collect_outer_enums(enums: &OuterEnumsMetadata, type_ids: &mut HashSet<u32>) {
type_ids.insert(enums.call_enum_ty);
type_ids.insert(enums.event_enum_ty);
type_ids.insert(enums.error_enum_ty);
}
/// Update all the type IDs for outer enums.
fn update_outer_enums(enums: &mut OuterEnumsMetadata, map_ids: &BTreeMap<u32, u32>) {
update_type(&mut enums.call_enum_ty, map_ids);
update_type(&mut enums.event_enum_ty, map_ids);
update_type(&mut enums.error_enum_ty, map_ids);
}
/// Update the given type using the new type ID from the portable registry.
///
/// # Panics
///
/// Panics if the [`scale_info::PortableRegistry`] did not retain all needed types.
fn update_type(ty: &mut u32, map_ids: &BTreeMap<u32, u32>) {
let old_id = *ty;
let new_id = map_ids
.get(&old_id)
.copied()
.unwrap_or_else(|| panic!("PortableRegistry did not retain type id {old_id}. This is a bug. Please open an issue."));
*ty = new_id;
}
/// Retain the enum type identified by ID and keep only the variants that
/// match the provided filter.
fn retain_variants_in_enum_type<F>(metadata: &mut Metadata, id: u32, mut filter: F)
where
F: FnMut(&str) -> bool,
{
let ty = metadata
fn resolve_typ(metadata: &Metadata, typ: u32) -> &PortableType {
metadata
.types
.types
.get_mut(id as usize)
.expect("Metadata should contain enum type in registry");
let TypeDef::Variant(variant) = &mut ty.ty.type_def else {
panic!("Metadata type is expected to be a variant type");
};
// Remove all variants from the type that aren't the pallet(s) we want to keep.
variant.variants.retain(|v| filter(&v.name));
}
/// Strip any pallets out of the outer enum types that aren't the ones we want to keep.
fn retain_pallets_in_runtime_outer_types<F>(metadata: &mut Metadata, mut filter: F)
where
F: FnMut(&str) -> bool,
{
retain_variants_in_enum_type(metadata, metadata.outer_enums.call_enum_ty, &mut filter);
retain_variants_in_enum_type(metadata, metadata.outer_enums.event_enum_ty, &mut filter);
retain_variants_in_enum_type(metadata, metadata.outer_enums.error_enum_ty, &mut filter);
.get(typ as usize)
.expect("Metadata should contain enum type in registry")
}
/// Generate a subset of the metadata that contains only the
@@ -208,27 +160,10 @@ pub fn retain_metadata<F, G>(
F: FnMut(&str) -> bool,
G: FnMut(&str) -> bool,
{
let mut type_ids = HashSet::new();
// There are special outer enum types that point to all pallets types (call, error, event) by default.
// This brings in a significant chunk of types. We trim this down to only include variants
// for the pallets we're retaining, to avoid this.
retain_pallets_in_runtime_outer_types(metadata, &mut pallets_filter);
// Collect the stripped outer enums.
collect_outer_enums(&metadata.outer_enums, &mut type_ids);
// Filter our pallet list to only those pallets we want to keep. Keep hold of all
// type IDs in the pallets we're keeping. Retain all, if no filter specified.
metadata.pallets.retain(|pallet| {
let should_retain = pallets_filter(&pallet.name);
if should_retain {
collect_pallet_types(pallet, &mut type_ids);
}
should_retain
});
// We index pallets by their u8 index for easy access. Rebuild this index.
// 1. Delete pallets we don't want to keep.
metadata
.pallets
.retain(|pallet| pallets_filter(&pallet.name));
metadata.pallets_by_index = metadata
.pallets
.values()
@@ -237,58 +172,188 @@ pub fn retain_metadata<F, G>(
.map(|(pos, p)| (p.index, pos))
.collect();
// Keep the extrinsic stuff referenced in our metadata.
collect_extrinsic_types(&metadata.extrinsic, &mut type_ids);
// 2. Delete runtime APIs we don't want to keep.
metadata.apis.retain(|api| runtime_apis_filter(&api.name));
// Keep the "runtime" type ID, since it's referenced in our metadata.
type_ids.insert(metadata.runtime_ty);
// Keep only the runtime API types that the filter allows for. Keep hold of all
// type IDs in the runtime apis we're keeping. Retain all, if no filter specified.
metadata.apis.retain(|api| {
let should_retain = runtime_apis_filter(&api.name);
if should_retain {
collect_runtime_api_types(api, &mut type_ids);
// 3. For each outer enum type, strip it if possible, ie if it is not returned by any
// of the things we're keeping (because if it is, we need to keep all of it so that we
// can still decode values into it).
let outer_enums = metadata.outer_enums();
let mut find_type_id = keep_outer_enum(metadata, &mut pallets_filter, &mut runtime_apis_filter);
for outer_enum_ty_id in [
outer_enums.call_enum_ty(),
outer_enums.error_enum_ty(),
outer_enums.event_enum_ty(),
] {
if !find_type_id(outer_enum_ty_id) {
strip_variants_in_enum_type(metadata, &mut pallets_filter, outer_enum_ty_id);
}
should_retain
});
}
// Additionally, subxt depends on the `DispatchError` type existing; we use the same
// logic here that is used when building our `Metadata`.
// 4. Collect all of the type IDs we still want to keep after deleting.
let mut keep_these_type_ids: BTreeSet<u32> =
iterate_metadata_types(metadata).map(|x| *x).collect();
// 5. Additionally, subxt depends on the `DispatchError` type existing; we use the same
// logic here that is used when building our `Metadata` to ensure we keep it too.
let dispatch_error_ty = metadata
.types
.types
.iter()
.find(|ty| ty.ty.path.segments == ["sp_runtime", "DispatchError"])
.expect("Metadata must contain sp_runtime::DispatchError");
type_ids.insert(dispatch_error_ty.id);
// Now, keep the type IDs we've asked for. This recursively keeps any types referenced from these.
// This will return a map from old to new type ID, because IDs may change.
let map_ids = metadata.types.retain(|id| type_ids.contains(&id));
keep_these_type_ids.insert(dispatch_error_ty.id);
// And finally, we can go and update all of our type IDs in the metadata as a result of this:
update_outer_enums(&mut metadata.outer_enums, &map_ids);
for pallets in metadata.pallets.values_mut() {
update_pallet_types(pallets, &map_ids);
// 5. Strip all of the type IDs we no longer need, based on the above set.
let map_ids = metadata
.types
.retain(|id| keep_these_type_ids.contains(&id));
// 6. Now, update the type IDs referenced in our metadata to reflect this.
for id in iterate_metadata_types(metadata) {
if let Some(new_id) = map_ids.get(id) {
*id = *new_id;
} else {
panic!("Type id {id} was not retained. This is a bug");
}
}
update_extrinsic_types(&mut metadata.extrinsic, &map_ids);
update_type(&mut metadata.runtime_ty, &map_ids);
update_runtime_api_types(metadata.apis.values_mut(), &map_ids);
}
fn strip_variants_in_enum_type<F>(metadata: &mut Metadata, mut pallets_filter: F, id: u32)
where
F: FnMut(&str) -> bool,
{
let ty = {
metadata
.types
.types
.get_mut(id as usize)
.expect("Metadata should contain enum type in registry")
};
let TypeDef::Variant(variant) = &mut ty.ty.type_def else {
panic!("Metadata type is expected to be a variant type");
};
variant.variants.retain(|v| pallets_filter(&v.name));
}
/// Returns an iterator that allows modifying each type ID seen in the metadata (not recursively).
/// This will iterate over every type referenced in the metadata outside of `metadata.types`.
fn iterate_metadata_types(metadata: &mut Metadata) -> impl Iterator<Item = &mut u32> {
let mut types = alloc::vec::Vec::new();
// collect outer_enum top-level types
let outer_enum = &mut metadata.outer_enums;
types.push(&mut outer_enum.call_enum_ty);
types.push(&mut outer_enum.event_enum_ty);
types.push(&mut outer_enum.error_enum_ty);
// collect pallet top-level type ids
for pallet in metadata.pallets.values_mut() {
if let Some(storage) = &mut pallet.storage {
for entry in storage.entries.values_mut() {
match &mut entry.entry_type {
StorageEntryType::Plain(ty) => {
types.push(ty);
}
StorageEntryType::Map {
key_ty, value_ty, ..
} => {
types.push(key_ty);
types.push(value_ty);
}
}
}
};
if let Some(ty) = &mut pallet.call_ty {
types.push(ty);
}
if let Some(ty) = &mut pallet.event_ty {
types.push(ty);
}
if let Some(ty) = &mut pallet.error_ty {
types.push(ty);
}
for constant in pallet.constants.values_mut() {
types.push(&mut constant.ty);
}
}
// collect extrinsic type_ids
for ty in [
&mut metadata.extrinsic.extra_ty,
&mut metadata.extrinsic.address_ty,
&mut metadata.extrinsic.signature_ty,
&mut metadata.extrinsic.call_ty,
] {
types.push(ty);
}
for signed in &mut metadata.extrinsic.signed_extensions {
types.push(&mut signed.extra_ty);
types.push(&mut signed.additional_ty);
}
types.push(&mut metadata.runtime_ty);
// collect runtime_api_types
for api in metadata.apis.values_mut() {
for method in api.methods.values_mut() {
for input in &mut method.inputs.iter_mut() {
types.push(&mut input.ty);
}
types.push(&mut method.output_ty);
}
}
types.into_iter()
}
/// Look for a type ID anywhere that we can be given back, ie in constants, storage, extrinsics or runtime API return types.
/// This will recurse deeply into those type IDs to find them.
pub fn keep_outer_enum<F, G>(
metadata: &Metadata,
pallets_filter: &mut F,
runtime_apis_filter: &mut G,
) -> impl FnMut(u32) -> bool
where
F: FnMut(&str) -> bool,
G: FnMut(&str) -> bool,
{
let mut type_set = TypeSet::new();
for pallet in metadata.pallets.values() {
if pallets_filter(&pallet.name) {
type_set.collect_pallet_types(pallet, metadata);
}
}
for api in metadata.apis.values() {
if runtime_apis_filter(&api.name) {
type_set.collect_runtime_api_types(metadata, api);
}
}
type_set.collect_extrinsic_types(&metadata.extrinsic);
move |type_id| type_set.contains(type_id)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::Metadata;
use assert_matches::assert_matches;
use codec::Decode;
use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed};
use std::{fs, path::Path};
fn load_metadata() -> Metadata {
let bytes = fs::read(Path::new("../artifacts/polkadot_metadata_full.scale"))
.expect("Cannot read metadata blob");
load_metadata_custom("../artifacts/polkadot_metadata_full.scale")
}
fn load_metadata_custom(path: impl AsRef<Path>) -> Metadata {
let bytes = fs::read(path).expect("Cannot read metadata blob");
let meta: RuntimeMetadataPrefixed =
Decode::decode(&mut &*bytes).expect("Cannot decode scale metadata");
@@ -305,8 +370,8 @@ mod tests {
// Retain one pallet at a time ensuring the test does not panic.
for pallet in metadata_cache.pallets() {
let original_meta = metadata_cache.clone();
let mut metadata = metadata_cache.clone();
retain_metadata(
&mut metadata,
|pallet_name| pallet_name == pallet.name(),
@@ -319,20 +384,11 @@ mod tests {
pallet.name()
);
let id = metadata.outer_enums().call_enum_ty;
let ty = metadata.types.resolve(id).unwrap();
let num_variants = if pallet.call_ty_id().is_some() { 1 } else { 0 };
assert_matches!(&ty.type_def, TypeDef::Variant(variant) if variant.variants.len() == num_variants);
let id = metadata.outer_enums().error_enum_ty;
let ty = metadata.types.resolve(id).unwrap();
let num_variants = if pallet.error_ty_id().is_some() { 1 } else { 0 };
assert_matches!(&ty.type_def, TypeDef::Variant(variant) if variant.variants.len() == num_variants);
let id = metadata.outer_enums().event_enum_ty;
let ty = metadata.types.resolve(id).unwrap();
let num_variants = if pallet.event_ty_id().is_some() { 1 } else { 0 };
assert_matches!(&ty.type_def, TypeDef::Variant(variant) if variant.variants.len() == num_variants);
assert!(
metadata.types.types.len() < original_meta.types.types.len(),
"Stripped metadata must have less retained types than the non-stripped one: stripped amount {}, original amount {}",
metadata.types.types.len(), original_meta.types.types.len()
);
}
}
@@ -356,4 +412,33 @@ mod tests {
);
}
}
#[test]
fn issue_1659() {
let full_metadata = load_metadata_custom("../artifacts/regressions/1659.scale");
// Strip metadata to the pallets as described in the issue.
let mut stripped_metadata = full_metadata.clone();
retain_metadata(
&mut stripped_metadata,
{
let set = "Balances,Timestamp,Contracts,ContractsEvm,System"
.split(",")
.collect::<BTreeSet<&str>>();
move |s| set.contains(&s)
},
|_| true,
);
// check that call_enum did not change as it is referenced inside runtime_api
assert_eq!(
stripped_metadata.type_hash(stripped_metadata.outer_enums.call_enum_ty),
full_metadata.type_hash(full_metadata.outer_enums.call_enum_ty)
);
// check that event_num did not change as it is referenced inside runtime_api
assert_eq!(
stripped_metadata.type_hash(stripped_metadata.outer_enums.event_enum_ty),
full_metadata.type_hash(full_metadata.outer_enums.event_enum_ty)
);
}
}
+5 -1
View File
@@ -589,7 +589,11 @@ impl<'a> MetadataHasher<'a> {
// Get the hashes of outer enums, considering only `specific_pallets` (if any are set).
// If any of the typed that represent outer enums are encountered later, hashes from `top_level_enum_hashes` can be substituted.
let outer_enum_hashes = OuterEnumHashes::new(metadata, self.specific_pallets.as_deref());
let outer_enum_hashes = OuterEnumHashes::new(
metadata,
self.specific_pallets.as_deref(),
self.specific_runtime_apis.as_deref(),
);
let pallet_hash = metadata.pallets().fold([0u8; HASH_LEN], |bytes, pallet| {
// If specific pallets are given, only include this pallet if it is in the specific pallets.
@@ -5,7 +5,10 @@ use hashbrown::HashMap;
use scale_info::{PortableRegistry, TypeDef};
use crate::{
utils::validation::{get_type_def_variant_hash, get_type_hash},
utils::{
retain,
validation::{get_type_def_variant_hash, get_type_hash},
},
Metadata,
};
@@ -20,7 +23,28 @@ pub struct OuterEnumHashes {
impl OuterEnumHashes {
/// Constructs new `OuterEnumHashes` from metadata. If `only_these_variants` is set, the enums are stripped down to only these variants, before their hashes are calculated.
pub fn new(metadata: &Metadata, only_these_variants: Option<&[&str]>) -> Self {
pub fn new(
metadata: &Metadata,
specific_pallets: Option<&[&str]>,
specific_runtimes: Option<&[&str]>,
) -> Self {
let filter = |names: Option<&[&str]>, name: &str| match names {
Some(names) => names.contains(&name),
None => true,
};
let mut check_enum_type_id = retain::keep_outer_enum(
metadata,
&mut |name| filter(specific_pallets, name),
&mut |name| filter(specific_runtimes, name),
);
let variants = |filter: bool| {
if !filter {
specific_pallets
} else {
None
}
};
fn get_enum_hash(
registry: &PortableRegistry,
id: u32,
@@ -46,9 +70,12 @@ impl OuterEnumHashes {
}
let enums = &metadata.outer_enums;
let call_hash = get_enum_hash(metadata.types(), enums.call_enum_ty, only_these_variants);
let event_hash = get_enum_hash(metadata.types(), enums.event_enum_ty, only_these_variants);
let error_hash = get_enum_hash(metadata.types(), enums.error_enum_ty, only_these_variants);
let call_variants = variants(check_enum_type_id(enums.call_enum_ty));
let call_hash = get_enum_hash(metadata.types(), enums.call_enum_ty, call_variants);
let event_variants = variants(check_enum_type_id(enums.event_enum_ty));
let event_hash = get_enum_hash(metadata.types(), enums.event_enum_ty, event_variants);
let error_variants = variants(check_enum_type_id(enums.error_enum_ty));
let error_hash = get_enum_hash(metadata.types(), enums.error_enum_ty, error_variants);
Self {
call_hash: (enums.call_enum_ty, call_hash),