diff --git a/Cargo.lock b/Cargo.lock index 65ecf50f7e..11549a9869 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -464,7 +464,7 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "artifacts" -version = "0.44.0" +version = "0.50.0" dependencies = [ "substrate-runner", ] @@ -2132,7 +2132,7 @@ dependencies = [ [[package]] name = "generate-custom-metadata" -version = "0.44.0" +version = "0.50.0" dependencies = [ "frame-metadata 23.0.0", "parity-scale-codec", @@ -2753,7 +2753,7 @@ dependencies = [ [[package]] name = "integration-tests" -version = "0.44.0" +version = "0.50.0" dependencies = [ "assert_matches", "cfg_aliases", @@ -5599,7 +5599,7 @@ dependencies = [ [[package]] name = "substrate-runner" -version = "0.44.0" +version = "0.50.0" [[package]] name = "subtle" @@ -5609,25 +5609,31 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "subxt" -version = "0.44.0" +version = "0.50.0" dependencies = [ "assert_matches", "async-trait", + "base58", "bitvec", + "blake2", "derive-where", "either", + "frame-decode", "frame-metadata 23.0.0", "futures", "hex", "http-body", "hyper", + "impl-serde", "jsonrpsee", + "keccak-hash", "parity-scale-codec", "primitive-types", "scale-bits", "scale-decode", "scale-encode", "scale-info", + "scale-info-legacy", "scale-value", "serde", "serde_json", @@ -5635,12 +5641,12 @@ dependencies = [ "sp-crypto-hashing", "sp-keyring", "sp-runtime", - "subxt-core", "subxt-lightclient", "subxt-macro", "subxt-metadata", "subxt-rpcs", "subxt-signer", + "subxt-utils-accountid32", "thiserror 2.0.12", "tokio", "tokio-util", @@ -5654,7 +5660,7 @@ dependencies = [ [[package]] name = "subxt-cli" -version = "0.44.0" +version = "0.50.0" dependencies = [ "clap", "color-eyre", @@ -5688,7 +5694,7 @@ dependencies = [ [[package]] name = "subxt-codegen" -version = "0.44.0" +version = "0.50.0" dependencies = [ "frame-metadata 23.0.0", "getrandom 0.2.16", @@ -5703,65 +5709,9 @@ dependencies = [ "thiserror 2.0.12", ] -[[package]] -name = "subxt-core" -version = "0.44.0" -dependencies = [ - "assert_matches", - "base58", - "bitvec", - "blake2", - "derive-where", - "frame-decode", - "frame-metadata 23.0.0", - "hashbrown 0.14.5", - "hex", - "impl-serde", - "keccak-hash", - "parity-scale-codec", - "primitive-types", - "scale-bits", - "scale-decode", - "scale-encode", - "scale-info", - "scale-value", - "serde", - "serde_json", - "sp-core", - "sp-crypto-hashing", - "sp-keyring", - "subxt-macro", - "subxt-metadata", - "subxt-signer", - "thiserror 2.0.12", - "tracing", -] - -[[package]] -name = "subxt-historic" -version = "0.0.8" -dependencies = [ - "frame-decode", - "frame-metadata 23.0.0", - "futures", - "hex", - "parity-scale-codec", - "primitive-types", - "scale-decode", - "scale-info", - "scale-info-legacy", - "scale-type-resolver", - "scale-value", - "sp-crypto-hashing", - "subxt-rpcs", - "thiserror 2.0.12", - "tokio", - "url", -] - [[package]] name = "subxt-lightclient" -version = "0.44.0" +version = "0.50.0" dependencies = [ "futures", "futures-timer", @@ -5786,7 +5736,7 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.44.0" +version = "0.50.0" dependencies = [ "darling", "parity-scale-codec", @@ -5806,7 +5756,7 @@ dependencies = [ [[package]] name = "subxt-metadata" -version = "0.44.0" +version = "0.50.0" dependencies = [ "bitvec", "criterion", @@ -5823,59 +5773,9 @@ dependencies = [ "thiserror 2.0.12", ] -[[package]] -name = "subxt-new" -version = "0.44.0" -dependencies = [ - "assert_matches", - "async-trait", - "base58", - "bitvec", - "blake2", - "derive-where", - "either", - "frame-decode", - "frame-metadata 23.0.0", - "futures", - "hex", - "http-body", - "hyper", - "impl-serde", - "jsonrpsee", - "keccak-hash", - "parity-scale-codec", - "primitive-types", - "scale-bits", - "scale-decode", - "scale-encode", - "scale-info", - "scale-info-legacy", - "scale-value", - "serde", - "serde_json", - "sp-core", - "sp-crypto-hashing", - "sp-keyring", - "sp-runtime", - "subxt-lightclient", - "subxt-macro", - "subxt-metadata", - "subxt-rpcs", - "subxt-signer", - "thiserror 2.0.12", - "tokio", - "tokio-util", - "tower", - "tracing", - "tracing-subscriber", - "url", - "wasm-bindgen-futures", - "web-time", -] - [[package]] name = "subxt-rpcs" -version = "0.44.0" +version = "0.50.0" dependencies = [ "derive-where", "finito", @@ -5891,7 +5791,6 @@ dependencies = [ "primitive-types", "serde", "serde_json", - "subxt-core", "subxt-lightclient", "thiserror 2.0.12", "tokio", @@ -5904,7 +5803,7 @@ dependencies = [ [[package]] name = "subxt-signer" -version = "0.44.0" +version = "0.50.0" dependencies = [ "base64 0.22.1", "bip32", @@ -5930,22 +5829,39 @@ dependencies = [ "sp-core", "sp-crypto-hashing", "sp-keyring", - "subxt-core", + "subxt", + "subxt-utils-accountid32", "thiserror 2.0.12", "zeroize", ] [[package]] name = "subxt-test-macro" -version = "0.44.0" +version = "0.50.0" dependencies = [ "quote", "syn 2.0.101", ] +[[package]] +name = "subxt-utils-accountid32" +version = "0.50.0" +dependencies = [ + "base58", + "blake2", + "parity-scale-codec", + "scale-decode", + "scale-encode", + "scale-info", + "serde", + "sp-core", + "sp-keyring", + "thiserror 2.0.12", +] + [[package]] name = "subxt-utils-fetchmetadata" -version = "0.44.0" +version = "0.50.0" dependencies = [ "frame-metadata 23.0.0", "hex", @@ -5958,7 +5874,7 @@ dependencies = [ [[package]] name = "subxt-utils-stripmetadata" -version = "0.44.0" +version = "0.50.0" dependencies = [ "either", "frame-metadata 23.0.0", @@ -6041,7 +5957,7 @@ dependencies = [ [[package]] name = "test-runtime" -version = "0.44.0" +version = "0.50.0" dependencies = [ "hex", "impl-serde", @@ -6190,9 +6106,7 @@ dependencies = [ "bytes", "libc", "mio", - "parking_lot", "pin-project-lite", - "signal-hook-registry", "socket2", "tokio-macros", "windows-sys 0.52.0", @@ -6469,7 +6383,7 @@ checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "ui-tests" -version = "0.44.0" +version = "0.50.0" dependencies = [ "frame-metadata 23.0.0", "generate-custom-metadata", diff --git a/Cargo.toml b/Cargo.toml index a1fc16a6cf..11871e3e84 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,9 @@ [workspace] members = [ - "new", + "subxt", "cli", "codegen", - "core", "lightclient", - "historic", "testing/substrate-runner", "testing/test-runtime", "testing/integration-tests", @@ -16,10 +14,10 @@ members = [ "metadata", "rpcs", "signer", - "subxt", "scripts/artifacts", "utils/fetch-metadata", "utils/strip-metadata", + "utils/accountid32" ] # We exclude any crates that would depend on non mutually @@ -39,7 +37,7 @@ resolver = "2" [workspace.package] authors = ["Parity Technologies "] edition = "2024" -version = "0.44.0" +version = "0.50.0" rust-version = "1.85.0" license = "Apache-2.0 OR GPL-3.0" repository = "https://github.com/paritytech/subxt" @@ -157,16 +155,16 @@ sp-state-machine = { version = "0.45.0", default-features = false } sp-runtime = { version = "41.1.0", default-features = false } # Subxt workspace crates: -subxt = { version = "0.44.0", path = "subxt", default-features = false } -subxt-core = { version = "0.44.0", path = "core", default-features = false } -subxt-macro = { version = "0.44.0", path = "macro" } -subxt-metadata = { version = "0.44.0", path = "metadata", default-features = false } -subxt-codegen = { version = "0.44.0", path = "codegen" } -subxt-signer = { version = "0.44.0", path = "signer", default-features = false } -subxt-rpcs = { version = "0.44.0", path = "rpcs", default-features = false } -subxt-lightclient = { version = "0.44.0", path = "lightclient", default-features = false } -subxt-utils-fetchmetadata = { version = "0.44.0", path = "utils/fetch-metadata", default-features = false } -subxt-utils-stripmetadata = { version = "0.44.0", path = "utils/strip-metadata", default-features = false } +subxt = { version = "0.50.0", path = "subxt", default-features = false } +subxt-macro = { version = "0.50.0", path = "macro" } +subxt-metadata = { version = "0.50.0", path = "metadata", default-features = false } +subxt-codegen = { version = "0.50.0", path = "codegen" } +subxt-signer = { version = "0.50.0", path = "signer", default-features = false } +subxt-rpcs = { version = "0.50.0", path = "rpcs", default-features = false } +subxt-lightclient = { version = "0.50.0", path = "lightclient", default-features = false } +subxt-utils-fetchmetadata = { version = "0.50.0", path = "utils/fetch-metadata", default-features = false } +subxt-utils-stripmetadata = { version = "0.50.0", path = "utils/strip-metadata", default-features = false } +subxt-utils-accountid32 = { version = "0.50.0", path = "utils/accountid32", default-features = false } test-runtime = { path = "testing/test-runtime" } substrate-runner = { path = "testing/substrate-runner" } diff --git a/codegen/src/api/calls.rs b/codegen/src/api/calls.rs index 25b48ec03e..b0272d48b9 100644 --- a/codegen/src/api/calls.rs +++ b/codegen/src/api/calls.rs @@ -17,7 +17,7 @@ use subxt_metadata::PalletMetadata; /// /// - `type_gen` - [`scale_typegen::TypeGenerator`] that contains settings and all types from the runtime metadata. /// - `pallet` - Pallet metadata from which the calls are generated. -/// - `crate_path` - The crate path under which the `subxt-core` crate is located, e.g. `::subxt::ext::subxt_core` when using subxt as a dependency. +/// - `crate_path` - The crate path under which the `subxt` crate is located, e.g. `::subxt` when using subxt as a dependency. pub fn generate_calls( type_gen: &TypeGenerator, pallet: &PalletMetadata, @@ -81,9 +81,9 @@ pub fn generate_calls( #struct_def #alias_mod - impl #crate_path::blocks::StaticExtrinsic for #struct_name { - const PALLET: &'static str = #pallet_name; - const CALL: &'static str = #call_name; + impl #crate_path::extrinsics::DecodeAsExtrinsic for #struct_name { + const PALLET_NAME: &'static str = #pallet_name; + const CALL_NAME: &'static str = #call_name; } }; @@ -92,8 +92,8 @@ pub fn generate_calls( pub fn #fn_name( &self, #( #call_fn_args, )* - ) -> #crate_path::tx::payload::StaticPayload { - #crate_path::tx::payload::StaticPayload::new_static( + ) -> #crate_path::transactions::payload::StaticPayload { + #crate_path::transactions::payload::StaticPayload::new_static( #pallet_name, #call_name, types::#struct_name { #( #call_args, )* }, diff --git a/codegen/src/api/constants.rs b/codegen/src/api/constants.rs index 7bc2b409d6..062eb730ea 100644 --- a/codegen/src/api/constants.rs +++ b/codegen/src/api/constants.rs @@ -32,7 +32,7 @@ use super::CodegenError; /// /// - `type_gen` - [`scale_typegen::TypeGenerator`] that contains settings and all types from the runtime metadata. /// - `pallet` - Pallet metadata from which the constants are generated. -/// - `crate_path` - The crate path under which the `subxt-core` crate is located, e.g. `::subxt::ext::subxt_core` when using subxt as a dependency. +/// - `crate_path` - The crate path under which the `subxt` crate is located, e.g. `::subxt` when using subxt as a dependency. pub fn generate_constants( type_gen: &TypeGenerator, pallet: &PalletMetadata, diff --git a/codegen/src/api/events.rs b/codegen/src/api/events.rs index 0fe92307d9..2cd036463b 100644 --- a/codegen/src/api/events.rs +++ b/codegen/src/api/events.rs @@ -37,7 +37,7 @@ use subxt_metadata::PalletMetadata; /// /// - `type_gen` - [`scale_typegen::TypeGenerator`] that contains settings and all types from the runtime metadata. /// - `pallet` - Pallet metadata from which the events are generated. -/// - `crate_path` - The crate path under which the `subxt-core` crate is located, e.g. `::subxt::ext::subxt_core` when using subxt as a dependency. +/// - `crate_path` - The crate path under which the `subxt` crate is located, e.g. `::subxt` when using subxt as a dependency. pub fn generate_events( type_gen: &TypeGenerator, pallet: &PalletMetadata, @@ -63,9 +63,9 @@ pub fn generate_events( #struct_def #alias_mod - impl #crate_path::events::StaticEvent for #event_struct_name { - const PALLET: &'static str = #pallet_name; - const EVENT: &'static str = #event_name; + impl #crate_path::events::DecodeAsEvent for #event_struct_name { + const PALLET_NAME: &'static str = #pallet_name; + const EVENT_NAME: &'static str = #event_name; } } }); diff --git a/codegen/src/api/mod.rs b/codegen/src/api/mod.rs index 7602f9d363..a799f10d0e 100644 --- a/codegen/src/api/mod.rs +++ b/codegen/src/api/mod.rs @@ -287,11 +287,16 @@ impl RuntimeGenerator { StorageApi } + /// This is an alias to [`Self::transactions()`]. pub fn tx() -> TransactionApi { TransactionApi } - pub fn apis() -> runtime_apis::RuntimeApi { + pub fn transactions() -> TransactionApi { + TransactionApi + } + + pub fn runtime_apis() -> runtime_apis::RuntimeApi { runtime_apis::RuntimeApi } @@ -301,7 +306,7 @@ impl RuntimeGenerator { ViewFunctionsApi } - pub fn custom() -> CustomValuesApi { + pub fn custom_values() -> CustomValuesApi { CustomValuesApi } diff --git a/codegen/src/api/runtime_apis.rs b/codegen/src/api/runtime_apis.rs index 4bfe1bff61..9a80b9bd5a 100644 --- a/codegen/src/api/runtime_apis.rs +++ b/codegen/src/api/runtime_apis.rs @@ -183,11 +183,11 @@ fn generate_runtime_api( pub fn #method_name( &self, #(#input_args),* - ) -> #crate_path::runtime_api::payload::StaticPayload< + ) -> #crate_path::runtime_apis::payload::StaticPayload< (#(#input_tuple_types,)*), #method_name::output::Output > { - #crate_path::runtime_api::payload::StaticPayload::new_static( + #crate_path::runtime_apis::payload::StaticPayload::new_static( #trait_name_str, #method_name_str, (#(#input_param_names,)*), diff --git a/codegen/src/api/storage.rs b/codegen/src/api/storage.rs index 3ef3cd041a..3cbdbdea11 100644 --- a/codegen/src/api/storage.rs +++ b/codegen/src/api/storage.rs @@ -19,7 +19,7 @@ use scale_typegen::typegen::ir::ToTokensWithSettings; /// /// - `type_gen` - [`scale_typegen::TypeGenerator`] that contains settings and all types from the runtime metadata. /// - `pallet` - Pallet metadata from which the storage items are generated. -/// - `crate_path` - The crate path under which the `subxt-core` crate is located, e.g. `::subxt::ext::subxt_core` when using subxt as a dependency. +/// - `crate_path` - The crate path under which the `subxt` crate is located, e.g. `::subxt` when using subxt as a dependency. pub fn generate_storage( type_gen: &TypeGenerator, pallet: &PalletMetadata, diff --git a/codegen/src/lib.rs b/codegen/src/lib.rs index 33161ae850..9b9a3fbf02 100644 --- a/codegen/src/lib.rs +++ b/codegen/src/lib.rs @@ -7,7 +7,6 @@ //! be used directly if preferable. #![deny(missing_docs)] -#![cfg_attr(docsrs, feature(doc_cfg))] mod api; pub mod error; @@ -71,7 +70,7 @@ pub struct CodegenBuilder { impl Default for CodegenBuilder { fn default() -> Self { CodegenBuilder { - crate_path: syn::parse_quote!(::subxt::ext::subxt_core), + crate_path: syn::parse_quote!(::subxt), use_default_derives: true, use_default_substitutions: true, generate_docs: true, @@ -216,7 +215,7 @@ impl CodegenBuilder { self.item_mod = item_mod; } - /// Set the path to the `subxt` crate. By default, we expect it to be at `::subxt::ext::subxt_core`. + /// Set the path to the `subxt` crate. By default, we expect it to be at `::subxt`. /// /// # Panics /// @@ -232,9 +231,9 @@ impl CodegenBuilder { self.crate_path = crate_path; } - /// Generate an interface, assuming that the default path to the `subxt` crate is `::subxt::ext::subxt_core`. + /// Generate an interface, assuming that the default path to the `subxt` crate is `::subxt`. /// If the `subxt` crate is not available as a top level dependency, use `generate` and provide - /// a valid path to the `subxt¦ crate. + /// a valid path to the `subxt` crate. pub fn generate(self, metadata: Metadata) -> Result { let crate_path = self.crate_path; @@ -300,7 +299,7 @@ impl CodegenBuilder { /// The default [`scale_typegen::TypeGeneratorSettings`], subxt is using for generating code. /// Useful for emulating subxt's code generation settings from e.g. subxt-explorer. pub fn default_subxt_type_gen_settings() -> TypeGeneratorSettings { - let crate_path: syn::Path = parse_quote!(::subxt::ext::subxt_core); + let crate_path: syn::Path = parse_quote!(::subxt); let derives = default_derives(&crate_path); let substitutes = default_substitutes(&crate_path); subxt_type_gen_settings(derives, substitutes, &crate_path, true) diff --git a/core/Cargo.toml b/core/Cargo.toml deleted file mode 100644 index f1021bdb0a..0000000000 --- a/core/Cargo.toml +++ /dev/null @@ -1,82 +0,0 @@ -[package] -name = "subxt-core" -version.workspace = true -authors.workspace = true -edition.workspace = true -rust-version.workspace = true -publish = true - -license.workspace = true -readme = "README.md" -repository.workspace = true -documentation.workspace = true -homepage.workspace = true -description = "A no-std compatible subset of Subxt's functionality" -keywords = ["parity", "subxt", "extrinsic", "no-std"] - -[features] -default = ["std"] -std = [ - "codec/std", - "scale-info/std", - "frame-metadata/std", - "subxt-metadata/std", - "hex/std", - "serde/std", - "serde_json/std", - "tracing/std", - "impl-serde/std", - "primitive-types/std", - "sp-core/std", - "sp-keyring/std", - "sp-crypto-hashing/std", -] - -[dependencies] -codec = { package = "parity-scale-codec", workspace = true, default-features = false, features = ["derive"] } -frame-decode = { workspace = true } -scale-info = { workspace = true, default-features = false, features = ["bit-vec"] } -scale-value = { workspace = true, default-features = false } -scale-bits = { workspace = true, default-features = false } -scale-decode = { workspace = true, default-features = false, features = ["derive", "primitive-types"] } -scale-encode = { workspace = true, default-features = false, features = ["derive", "primitive-types", "bits"] } -frame-metadata = { workspace = true, default-features = false } -subxt-metadata = { workspace = true, default-features = false } -derive-where = { workspace = true } -hex = { workspace = true } -serde = { workspace = true, default-features = false, features = ["derive"] } -serde_json = { workspace = true, default-features = false, features = ["raw_value", "alloc"] } -tracing = { workspace = true, default-features = false } -sp-crypto-hashing = { workspace = true } -hashbrown = { workspace = true } -thiserror = { workspace = true, default-features = false } - -# For ss58 encoding AccountId32 to serialize them properly: -base58 = { workspace = true } -blake2 = { workspace = true } - -# Provides some deserialization, types like U256/H256 and hashing impls like twox/blake256: -impl-serde = { workspace = true, default-features = false } -primitive-types = { workspace = true, default-features = false, features = ["codec", "serde_no_std", "scale-info"] } - -# AccountId20 -keccak-hash = { workspace = true} - -[dev-dependencies] -assert_matches = { workspace = true } -bitvec = { workspace = true } -codec = { workspace = true, features = ["derive", "bit-vec"] } -subxt-macro = { workspace = true } -subxt-signer = { workspace = true, features = ["sr25519", "subxt"] } -sp-core = { workspace = true } -sp-keyring = { workspace = true } -hex = { workspace = true } - -[package.metadata.docs.rs] -default-features = true - -[package.metadata.playground] -default-features = true - -[lints] -workspace = true diff --git a/core/README.md b/core/README.md deleted file mode 100644 index fb25fabfa7..0000000000 --- a/core/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Subxt-Core - -This library provides a no-std compatible subset of functionality that `subxt` and `subxt-signer` rely on. \ No newline at end of file diff --git a/core/src/blocks/extrinsic_transaction_extensions.rs b/core/src/blocks/extrinsic_transaction_extensions.rs deleted file mode 100644 index 12ef7fe156..0000000000 --- a/core/src/blocks/extrinsic_transaction_extensions.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::config::TransactionExtension; -use crate::config::transaction_extensions::{ - ChargeAssetTxPayment, ChargeTransactionPayment, CheckNonce, -}; -use crate::dynamic::Value; -use crate::error::ExtrinsicError; -use crate::{Metadata, config::Config}; -use alloc::borrow::ToOwned; -use frame_decode::extrinsics::ExtrinsicExtensions; -use scale_decode::DecodeAsType; - -/// The signed extensions of an extrinsic. -#[derive(Debug, Clone)] -pub struct ExtrinsicTransactionExtensions<'a, T: Config> { - bytes: &'a [u8], - metadata: &'a Metadata, - decoded_info: &'a ExtrinsicExtensions<'static, u32>, - _marker: core::marker::PhantomData, -} - -impl<'a, T: Config> ExtrinsicTransactionExtensions<'a, T> { - pub(crate) fn new( - bytes: &'a [u8], - metadata: &'a Metadata, - decoded_info: &'a ExtrinsicExtensions<'static, u32>, - ) -> Self { - Self { - bytes, - metadata, - decoded_info, - _marker: core::marker::PhantomData, - } - } - - /// Returns an iterator over each of the signed extension details of the extrinsic. - pub fn iter(&self) -> impl Iterator> + use<'a, T> { - self.decoded_info - .iter() - .map(|s| ExtrinsicTransactionExtension { - bytes: &self.bytes[s.range()], - ty_id: *s.ty(), - identifier: s.name(), - metadata: self.metadata, - _marker: core::marker::PhantomData, - }) - } - - /// Searches through all signed extensions to find a specific one. - /// If the Signed Extension is not found `Ok(None)` is returned. - /// If the Signed Extension is found but decoding failed `Err(_)` is returned. - pub fn find>(&self) -> Result, ExtrinsicError> { - for ext in self.iter() { - match ext.as_signed_extension::() { - // We found a match; return it: - Ok(Some(e)) => return Ok(Some(e)), - // No error, but no match either; next! - Ok(None) => continue, - // Error? return it - Err(e) => return Err(e), - } - } - Ok(None) - } - - /// The tip of an extrinsic, extracted from the ChargeTransactionPayment or ChargeAssetTxPayment - /// signed extension, depending on which is present. - /// - /// Returns `None` if `tip` was not found or decoding failed. - pub fn tip(&self) -> Option { - // Note: the overhead of iterating multiple time should be negligible. - self.find::() - .ok() - .flatten() - .map(|e| e.tip()) - .or_else(|| { - self.find::>() - .ok() - .flatten() - .map(|e| e.tip()) - }) - } - - /// The nonce of the account that submitted the extrinsic, extracted from the CheckNonce signed extension. - /// - /// Returns `None` if `nonce` was not found or decoding failed. - pub fn nonce(&self) -> Option { - self.find::().ok()? - } -} - -/// A single signed extension -#[derive(Debug, Clone)] -pub struct ExtrinsicTransactionExtension<'a, T: Config> { - bytes: &'a [u8], - ty_id: u32, - identifier: &'a str, - metadata: &'a Metadata, - _marker: core::marker::PhantomData, -} - -impl<'a, T: Config> ExtrinsicTransactionExtension<'a, T> { - /// The bytes representing this signed extension. - pub fn bytes(&self) -> &'a [u8] { - self.bytes - } - - /// The name of the signed extension. - pub fn name(&self) -> &'a str { - self.identifier - } - - /// The type id of the signed extension. - pub fn type_id(&self) -> u32 { - self.ty_id - } - - /// Signed Extension as a [`scale_value::Value`] - pub fn value(&self) -> Result, ExtrinsicError> { - let value = scale_value::scale::decode_as_type( - &mut &self.bytes[..], - self.ty_id, - self.metadata.types(), - ) - .map_err(|e| ExtrinsicError::CouldNotDecodeTransactionExtension { - name: self.identifier.to_owned(), - error: e.into(), - })?; - Ok(value) - } - - /// Decodes the bytes of this Signed Extension into its associated `Decoded` type. - /// Returns `Ok(None)` if the data we have doesn't match the Signed Extension we're asking to - /// decode with. - pub fn as_signed_extension>( - &self, - ) -> Result, ExtrinsicError> { - if !S::matches(self.identifier, self.ty_id, self.metadata.types()) { - return Ok(None); - } - self.as_type::().map(Some) - } - - fn as_type(&self) -> Result { - let value = E::decode_as_type(&mut &self.bytes[..], self.ty_id, self.metadata.types()) - .map_err(|e| ExtrinsicError::CouldNotDecodeTransactionExtension { - name: self.identifier.to_owned(), - error: e, - })?; - Ok(value) - } -} diff --git a/core/src/blocks/extrinsics.rs b/core/src/blocks/extrinsics.rs deleted file mode 100644 index 94de10ade3..0000000000 --- a/core/src/blocks/extrinsics.rs +++ /dev/null @@ -1,644 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::blocks::extrinsic_transaction_extensions::ExtrinsicTransactionExtensions; -use crate::{ - Metadata, - config::{Config, HashFor, Hasher}, - error::{ExtrinsicDecodeErrorAt, ExtrinsicDecodeErrorAtReason, ExtrinsicError}, -}; -use alloc::sync::Arc; -use alloc::vec::Vec; -use frame_decode::extrinsics::Extrinsic; -use scale_decode::{DecodeAsFields, DecodeAsType}; - -pub use crate::blocks::StaticExtrinsic; - -/// The body of a block. -pub struct Extrinsics { - extrinsics: Vec, Vec)>>, - metadata: Metadata, - hasher: T::Hasher, - _marker: core::marker::PhantomData, -} - -impl Extrinsics { - /// Instantiate a new [`Extrinsics`] object, given a vector containing - /// each extrinsic hash (in the form of bytes) and some metadata that - /// we'll use to decode them. - pub fn decode_from( - extrinsics: Vec>, - metadata: Metadata, - ) -> Result { - let hasher = T::Hasher::new(&metadata); - let extrinsics = extrinsics - .into_iter() - .enumerate() - .map(|(extrinsic_index, bytes)| { - let cursor = &mut &*bytes; - - // Try to decode the extrinsic. - let decoded_info = - frame_decode::extrinsics::decode_extrinsic(cursor, &metadata, metadata.types()) - .map_err(|error| ExtrinsicDecodeErrorAt { - extrinsic_index, - error: ExtrinsicDecodeErrorAtReason::DecodeError(error), - })? - .into_owned(); - - // We didn't consume all bytes, so decoding probably failed. - if !cursor.is_empty() { - return Err(ExtrinsicDecodeErrorAt { - extrinsic_index, - error: ExtrinsicDecodeErrorAtReason::LeftoverBytes(cursor.to_vec()), - }); - } - - Ok(Arc::new((decoded_info, bytes))) - }) - .collect::>()?; - - Ok(Self { - extrinsics, - hasher, - metadata, - _marker: core::marker::PhantomData, - }) - } - - /// The number of extrinsics. - pub fn len(&self) -> usize { - self.extrinsics.len() - } - - /// Are there no extrinsics in this block? - // Note: mainly here to satisfy clippy. - pub fn is_empty(&self) -> bool { - self.extrinsics.is_empty() - } - - /// Returns an iterator over the extrinsics in the block body. - // Dev note: The returned iterator is 'static + Send so that we can box it up and make - // use of it with our `FilterExtrinsic` stuff. - pub fn iter(&self) -> impl Iterator> + Send + Sync + 'static { - let extrinsics = self.extrinsics.clone(); - let num_extrinsics = self.extrinsics.len(); - let hasher = self.hasher; - let metadata = self.metadata.clone(); - - (0..num_extrinsics).map(move |index| { - ExtrinsicDetails::new( - index as u32, - extrinsics[index].clone(), - hasher, - metadata.clone(), - ) - }) - } - - /// Iterate through the extrinsics using metadata to dynamically decode and skip - /// them, and return only those which should decode to the provided `E` type. - /// If an error occurs, all subsequent iterations return `None`. - pub fn find( - &self, - ) -> impl Iterator, ExtrinsicError>> { - self.iter().filter_map(|details| { - match details.as_extrinsic::() { - // Failed to decode extrinsic: - Err(err) => Some(Err(err)), - // Extrinsic for a different pallet / different call (skip): - Ok(None) => None, - Ok(Some(value)) => Some(Ok(FoundExtrinsic { details, value })), - } - }) - } - - /// Iterate through the extrinsics using metadata to dynamically decode and skip - /// them, and return the first extrinsic found which decodes to the provided `E` type. - pub fn find_first( - &self, - ) -> Result>, ExtrinsicError> { - self.find::().next().transpose() - } - - /// Iterate through the extrinsics using metadata to dynamically decode and skip - /// them, and return the last extrinsic found which decodes to the provided `Ev` type. - pub fn find_last( - &self, - ) -> Result>, ExtrinsicError> { - self.find::().last().transpose() - } - - /// Find an extrinsics that decodes to the type provided. Returns true if it was found. - pub fn has(&self) -> Result { - Ok(self.find::().next().transpose()?.is_some()) - } -} - -/// A single extrinsic in a block. -pub struct ExtrinsicDetails { - /// The index of the extrinsic in the block. - index: u32, - /// Extrinsic bytes and decode info. - ext: Arc<(Extrinsic<'static, u32>, Vec)>, - /// Hash the extrinsic if we want. - hasher: T::Hasher, - /// Subxt metadata to fetch the extrinsic metadata. - metadata: Metadata, - _marker: core::marker::PhantomData, -} - -impl ExtrinsicDetails -where - T: Config, -{ - // Attempt to dynamically decode a single extrinsic from the given input. - #[doc(hidden)] - pub fn new( - index: u32, - ext: Arc<(Extrinsic<'static, u32>, Vec)>, - hasher: T::Hasher, - metadata: Metadata, - ) -> ExtrinsicDetails { - ExtrinsicDetails { - index, - ext, - hasher, - metadata, - _marker: core::marker::PhantomData, - } - } - - /// Calculate and return the hash of the extrinsic, based on the configured hasher. - pub fn hash(&self) -> HashFor { - // Use hash(), not hash_of(), because we don't want to double encode the bytes. - self.hasher.hash(self.bytes()) - } - - /// Is the extrinsic signed? - pub fn is_signed(&self) -> bool { - self.decoded_info().is_signed() - } - - /// The index of the extrinsic in the block. - pub fn index(&self) -> u32 { - self.index - } - - /// Return _all_ of the bytes representing this extrinsic, which include, in order: - /// - First byte: abbbbbbb (a = 0 for unsigned, 1 for signed, b = version) - /// - SignatureType (if the payload is signed) - /// - Address - /// - Signature - /// - Extra fields - /// - Extrinsic call bytes - pub fn bytes(&self) -> &[u8] { - &self.ext.1 - } - - /// Return only the bytes representing this extrinsic call: - /// - First byte is the pallet index - /// - Second byte is the variant (call) index - /// - Followed by field bytes. - /// - /// # Note - /// - /// Please use [`Self::bytes`] if you want to get all extrinsic bytes. - pub fn call_bytes(&self) -> &[u8] { - &self.bytes()[self.decoded_info().call_data_range()] - } - - /// Return the bytes representing the fields stored in this extrinsic. - /// - /// # Note - /// - /// This is a subset of [`Self::call_bytes`] that does not include the - /// first two bytes that denote the pallet index and the variant index. - pub fn field_bytes(&self) -> &[u8] { - // Note: this cannot panic because we checked the extrinsic bytes - // to contain at least two bytes. - &self.bytes()[self.decoded_info().call_data_args_range()] - } - - /// Return only the bytes of the address that signed this extrinsic. - /// - /// # Note - /// - /// Returns `None` if the extrinsic is not signed. - pub fn address_bytes(&self) -> Option<&[u8]> { - self.decoded_info() - .signature_payload() - .map(|s| &self.bytes()[s.address_range()]) - } - - /// Returns Some(signature_bytes) if the extrinsic was signed otherwise None is returned. - pub fn signature_bytes(&self) -> Option<&[u8]> { - self.decoded_info() - .signature_payload() - .map(|s| &self.bytes()[s.signature_range()]) - } - - /// Returns the signed extension `extra` bytes of the extrinsic. - /// Each signed extension has an `extra` type (May be zero-sized). - /// These bytes are the scale encoded `extra` fields of each signed extension in order of the signed extensions. - /// They do *not* include the `additional` signed bytes that are used as part of the payload that is signed. - /// - /// Note: Returns `None` if the extrinsic is not signed. - pub fn transaction_extensions_bytes(&self) -> Option<&[u8]> { - self.decoded_info() - .transaction_extension_payload() - .map(|t| &self.bytes()[t.range()]) - } - - /// Returns `None` if the extrinsic is not signed. - pub fn transaction_extensions(&self) -> Option> { - self.decoded_info() - .transaction_extension_payload() - .map(|t| ExtrinsicTransactionExtensions::new(self.bytes(), &self.metadata, t)) - } - - /// The index of the pallet that the extrinsic originated from. - pub fn pallet_index(&self) -> u8 { - self.decoded_info().pallet_index() - } - - /// The index of the extrinsic variant that the extrinsic originated from. - pub fn call_index(&self) -> u8 { - self.decoded_info().call_index() - } - - /// The name of the pallet from whence the extrinsic originated. - pub fn pallet_name(&self) -> &str { - self.decoded_info().pallet_name() - } - - /// The name of the call (ie the name of the variant that it corresponds to). - pub fn call_name(&self) -> &str { - self.decoded_info().call_name() - } - - /// Decode and provide the extrinsic fields back in the form of a [`scale_value::Composite`] - /// type which represents the named or unnamed fields that were present in the extrinsic. - pub fn decode_as_fields(&self) -> Result { - let bytes = &mut self.field_bytes(); - let mut fields = self.decoded_info().call_data().map(|d| { - let name = if d.name().is_empty() { - None - } else { - Some(d.name()) - }; - scale_decode::Field::new(*d.ty(), name) - }); - let decoded = - E::decode_as_fields(bytes, &mut fields, self.metadata.types()).map_err(|e| { - ExtrinsicError::CannotDecodeFields { - extrinsic_index: self.index as usize, - error: e, - } - })?; - - Ok(decoded) - } - - /// Attempt to decode these [`ExtrinsicDetails`] into a type representing the extrinsic fields. - /// Such types are exposed in the codegen as `pallet_name::calls::types::CallName` types. - pub fn as_extrinsic(&self) -> Result, ExtrinsicError> { - if self.decoded_info().pallet_name() == E::PALLET - && self.decoded_info().call_name() == E::CALL - { - let mut fields = self.decoded_info().call_data().map(|d| { - let name = if d.name().is_empty() { - None - } else { - Some(d.name()) - }; - scale_decode::Field::new(*d.ty(), name) - }); - let decoded = - E::decode_as_fields(&mut self.field_bytes(), &mut fields, self.metadata.types()) - .map_err(|e| ExtrinsicError::CannotDecodeFields { - extrinsic_index: self.index as usize, - error: e, - })?; - Ok(Some(decoded)) - } else { - Ok(None) - } - } - - /// Attempt to decode these [`ExtrinsicDetails`] into an outer call enum type (which includes - /// the pallet and extrinsic enum variants as well as the extrinsic fields). A compatible - /// type for this is exposed via static codegen as a root level `Call` type. - pub fn as_root_extrinsic(&self) -> Result { - let decoded = E::decode_as_type( - &mut &self.call_bytes()[..], - self.metadata.outer_enums().call_enum_ty(), - self.metadata.types(), - ) - .map_err(|e| ExtrinsicError::CannotDecodeIntoRootExtrinsic { - extrinsic_index: self.index as usize, - error: e, - })?; - - Ok(decoded) - } - - fn decoded_info(&self) -> &Extrinsic<'static, u32> { - &self.ext.0 - } -} - -/// A Static Extrinsic found in a block coupled with it's details. -pub struct FoundExtrinsic { - /// Details for the extrinsic. - pub details: ExtrinsicDetails, - /// The decoded extrinsic value. - pub value: E, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::config::SubstrateConfig; - use assert_matches::assert_matches; - use codec::{Decode, Encode}; - use frame_metadata::v15::{CustomMetadata, OuterEnums}; - use frame_metadata::{ - RuntimeMetadataPrefixed, - v15::{ExtrinsicMetadata, PalletCallMetadata, PalletMetadata, RuntimeMetadataV15}, - }; - use scale_info::{TypeInfo, meta_type}; - use scale_value::Value; - - // Extrinsic needs to contain at least the generic type parameter "Call" - // for the metadata to be valid. - // The "Call" type from the metadata is used to decode extrinsics. - #[allow(unused)] - #[derive(TypeInfo)] - struct ExtrinsicType { - pub signature: Option<(Address, Signature, Extra)>, - pub function: Call, - } - - // Because this type is used to decode extrinsics, we expect this to be a TypeDefVariant. - // Each pallet must contain one single variant. - #[allow(unused)] - #[derive( - Encode, - Decode, - TypeInfo, - Clone, - Debug, - PartialEq, - Eq, - scale_encode::EncodeAsType, - scale_decode::DecodeAsType, - )] - enum RuntimeCall { - Test(Pallet), - } - - // The calls of the pallet. - #[allow(unused)] - #[derive( - Encode, - Decode, - TypeInfo, - Clone, - Debug, - PartialEq, - Eq, - scale_encode::EncodeAsType, - scale_decode::DecodeAsType, - )] - enum Pallet { - #[allow(unused)] - #[codec(index = 2)] - TestCall { - value: u128, - signed: bool, - name: String, - }, - } - - #[allow(unused)] - #[derive( - Encode, - Decode, - TypeInfo, - Clone, - Debug, - PartialEq, - Eq, - scale_encode::EncodeAsType, - scale_decode::DecodeAsType, - )] - struct TestCallExtrinsic { - value: u128, - signed: bool, - name: String, - } - - impl StaticExtrinsic for TestCallExtrinsic { - const PALLET: &'static str = "Test"; - const CALL: &'static str = "TestCall"; - } - - /// Build fake metadata consisting the types needed to represent an extrinsic. - fn metadata() -> Metadata { - let pallets = vec![PalletMetadata { - name: "Test", - storage: None, - calls: Some(PalletCallMetadata { - ty: meta_type::(), - }), - event: None, - constants: vec![], - error: None, - index: 0, - docs: vec![], - }]; - - let extrinsic = ExtrinsicMetadata { - version: 4, - signed_extensions: vec![], - address_ty: meta_type::<()>(), - call_ty: meta_type::(), - signature_ty: meta_type::<()>(), - extra_ty: meta_type::<()>(), - }; - - let meta = RuntimeMetadataV15::new( - pallets, - extrinsic, - meta_type::<()>(), - vec![], - OuterEnums { - call_enum_ty: meta_type::(), - event_enum_ty: meta_type::<()>(), - error_enum_ty: meta_type::<()>(), - }, - CustomMetadata { - map: Default::default(), - }, - ); - let runtime_metadata: RuntimeMetadataPrefixed = meta.into(); - let metadata: subxt_metadata::Metadata = runtime_metadata.try_into().unwrap(); - - metadata - } - - #[test] - fn extrinsic_metadata_consistency() { - let metadata = metadata(); - - // Except our metadata to contain the registered types. - let pallet = metadata.pallet_by_call_index(0).expect("pallet exists"); - let extrinsic = pallet - .call_variant_by_index(2) - .expect("metadata contains the RuntimeCall enum with this pallet"); - - assert_eq!(pallet.name(), "Test"); - assert_eq!(&extrinsic.name, "TestCall"); - } - - #[test] - fn insufficient_extrinsic_bytes() { - let metadata = metadata(); - - // Decode with empty bytes. - let result = Extrinsics::::decode_from(vec![vec![]], metadata); - assert_matches!( - result.err(), - Some(crate::error::ExtrinsicDecodeErrorAt { - extrinsic_index: 0, - error: _ - }) - ); - } - - #[test] - fn unsupported_version_extrinsic() { - use frame_decode::extrinsics::ExtrinsicDecodeError; - - let metadata = metadata(); - - // Decode with invalid version. - let result = Extrinsics::::decode_from(vec![vec![3u8].encode()], metadata); - - assert_matches!( - result.err(), - Some(crate::error::ExtrinsicDecodeErrorAt { - extrinsic_index: 0, - error: ExtrinsicDecodeErrorAtReason::DecodeError( - ExtrinsicDecodeError::VersionNotSupported(3) - ), - }) - ); - } - - #[test] - fn tx_hashes_line_up() { - let metadata = metadata(); - let hasher = ::Hasher::new(&metadata); - - let tx = crate::dynamic::tx( - "Test", - "TestCall", - vec![ - Value::u128(10), - Value::bool(true), - Value::string("SomeValue"), - ], - ); - - // Encoded TX ready to submit. - let tx_encoded = crate::tx::create_v4_unsigned::(&tx, &metadata) - .expect("Valid dynamic parameters are provided"); - - // Extrinsic details ready to decode. - let extrinsics = Extrinsics::::decode_from( - vec![tx_encoded.encoded().to_owned()], - metadata, - ) - .expect("Valid extrinsic"); - - let extrinsic = extrinsics.iter().next().unwrap(); - - // Both of these types should produce the same bytes. - assert_eq!(tx_encoded.encoded(), extrinsic.bytes(), "bytes should eq"); - // Both of these types should produce the same hash. - assert_eq!( - tx_encoded.hash_with(hasher), - extrinsic.hash(), - "hashes should eq" - ); - } - - #[test] - fn statically_decode_extrinsic() { - let metadata = metadata(); - - let tx = crate::dynamic::tx( - "Test", - "TestCall", - vec![ - Value::u128(10), - Value::bool(true), - Value::string("SomeValue"), - ], - ); - let tx_encoded = crate::tx::create_v4_unsigned::(&tx, &metadata) - .expect("Valid dynamic parameters are provided"); - - // Note: `create_unsigned` produces the extrinsic bytes by prefixing the extrinsic length. - // The length is handled deserializing `ChainBlockExtrinsic`, therefore the first byte is not needed. - let extrinsics = Extrinsics::::decode_from( - vec![tx_encoded.encoded().to_owned()], - metadata, - ) - .expect("Valid extrinsic"); - - let extrinsic = extrinsics.iter().next().unwrap(); - - assert!(!extrinsic.is_signed()); - - assert_eq!(extrinsic.index(), 0); - - assert_eq!(extrinsic.pallet_index(), 0); - assert_eq!(extrinsic.pallet_name(), "Test"); - - assert_eq!(extrinsic.call_index(), 2); - assert_eq!(extrinsic.call_name(), "TestCall"); - - // Decode the extrinsic to the root enum. - let decoded_extrinsic = extrinsic - .as_root_extrinsic::() - .expect("can decode extrinsic to root enum"); - - assert_eq!( - decoded_extrinsic, - RuntimeCall::Test(Pallet::TestCall { - value: 10, - signed: true, - name: "SomeValue".into(), - }) - ); - - // Decode the extrinsic to the extrinsic variant. - let decoded_extrinsic = extrinsic - .as_extrinsic::() - .expect("can decode extrinsic to extrinsic variant") - .expect("value cannot be None"); - - assert_eq!( - decoded_extrinsic, - TestCallExtrinsic { - value: 10, - signed: true, - name: "SomeValue".into(), - } - ); - } -} diff --git a/core/src/blocks/mod.rs b/core/src/blocks/mod.rs deleted file mode 100644 index 7ac90ff114..0000000000 --- a/core/src/blocks/mod.rs +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Decode and iterate over the extrinsics in block bodies. -//! -//! Use the [`decode_from`] function as an entry point to decoding extrinsics, and then -//! have a look at [`Extrinsics`] and [`ExtrinsicDetails`] to see which methods are available -//! to work with the extrinsics. -//! -//! # Example -//! -//! ```rust -//! extern crate alloc; -//! -//! use subxt_macro::subxt; -//! use subxt_core::blocks; -//! use subxt_core::Metadata; -//! use subxt_core::config::PolkadotConfig; -//! use alloc::vec; -//! -//! // If we generate types without `subxt`, we need to point to `::subxt_core`: -//! #[subxt( -//! crate = "::subxt_core", -//! runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale", -//! )] -//! pub mod polkadot {} -//! -//! // Some metadata we'd like to use to help us decode extrinsics: -//! let metadata_bytes = include_bytes!("../../../artifacts/polkadot_metadata_small.scale"); -//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); -//! -//! // Some extrinsics we'd like to decode: -//! let ext_bytes = vec![ -//! hex::decode("1004020000").unwrap(), -//! hex::decode("c10184001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c01a27c400241aeafdea1871b32f1f01e92acd272ddfe6b2f8b73b64c606572a530c470a94ef654f7baa5828474754a1fe31b59f91f6bb5c2cd5a07c22d4b8b8387350100000000001448656c6c6f").unwrap(), -//! hex::decode("550284001cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c0144bb92734447c893ab16d520fae0d455257550efa28ee66bf6dc942cb8b00d5d2799b98bc2865d21812278a9a266acd7352f40742ff11a6ce1f400013961598485010000000400008eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a481700505a4f7e9f4eb106").unwrap() -//! ]; -//! -//! // Given some chain config and metadata, we know how to decode the bytes. -//! let exts = blocks::decode_from::(ext_bytes, metadata).unwrap(); -//! -//! // We'll see 3 extrinsics: -//! assert_eq!(exts.len(), 3); -//! -//! // We can iterate over them and decode various details out of them. -//! for ext in exts.iter() { -//! println!("Pallet: {}", ext.pallet_name()); -//! println!("Call: {}", ext.call_name()); -//! } -//! -//! # let ext_details: Vec<_> = exts.iter() -//! # .map(|ext| { -//! # let pallet = ext.pallet_name().to_string(); -//! # let call = ext.call_name().to_string(); -//! # (pallet, call) -//! # }) -//! # .collect(); -//! # -//! # assert_eq!(ext_details, vec![ -//! # ("Timestamp".to_owned(), "set".to_owned()), -//! # ("System".to_owned(), "remark".to_owned()), -//! # ("Balances".to_owned(), "transfer_allow_death".to_owned()), -//! # ]); -//! ``` - -mod extrinsic_transaction_extensions; -mod extrinsics; -mod static_extrinsic; - -use crate::Metadata; -use crate::config::Config; -use crate::error::ExtrinsicDecodeErrorAt; -pub use crate::error::ExtrinsicError; -use alloc::vec::Vec; -pub use extrinsic_transaction_extensions::{ - ExtrinsicTransactionExtension, ExtrinsicTransactionExtensions, -}; -pub use extrinsics::{ExtrinsicDetails, Extrinsics, FoundExtrinsic}; -pub use static_extrinsic::StaticExtrinsic; - -/// Instantiate a new [`Extrinsics`] object, given a vector containing each extrinsic hash (in the -/// form of bytes) and some metadata that we'll use to decode them. -/// -/// This is a shortcut for [`Extrinsics::decode_from`]. -pub fn decode_from( - extrinsics: Vec>, - metadata: Metadata, -) -> Result, ExtrinsicDecodeErrorAt> { - Extrinsics::decode_from(extrinsics, metadata) -} diff --git a/core/src/blocks/static_extrinsic.rs b/core/src/blocks/static_extrinsic.rs deleted file mode 100644 index 263977863d..0000000000 --- a/core/src/blocks/static_extrinsic.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use scale_decode::DecodeAsFields; - -/// Trait to uniquely identify the extrinsic's identity from the runtime metadata. -/// -/// Generated API structures that represent an extrinsic implement this trait. -/// -/// The trait is utilized to decode emitted extrinsics from a block, via obtaining the -/// form of the `Extrinsic` from the metadata. -pub trait StaticExtrinsic: DecodeAsFields { - /// Pallet name. - const PALLET: &'static str; - /// Call name. - const CALL: &'static str; - - /// Returns true if the given pallet and call names match this extrinsic. - fn is_extrinsic(pallet: &str, call: &str) -> bool { - Self::PALLET == pallet && Self::CALL == call - } -} diff --git a/core/src/client.rs b/core/src/client.rs deleted file mode 100644 index cfc8de4fa1..0000000000 --- a/core/src/client.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! A couple of client types that we use elsewhere. - -use crate::{ - Metadata, - config::{Config, HashFor}, -}; -use derive_where::derive_where; - -/// This provides access to some relevant client state in transaction extensions, -/// and is just a combination of some of the available properties. -#[derive_where(Clone, Debug)] -pub struct ClientState { - /// Genesis hash. - pub genesis_hash: HashFor, - /// Runtime version. - pub runtime_version: RuntimeVersion, - /// Metadata. - pub metadata: Metadata, -} - -/// Runtime version information needed to submit transactions. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct RuntimeVersion { - /// Version of the runtime specification. A full-node will not attempt to use its native - /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, - /// `spec_version` and `authoring_version` are the same between Wasm and native. - pub spec_version: u32, - /// All existing dispatches are fully compatible when this number doesn't change. If this - /// number changes, then `spec_version` must change, also. - /// - /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, - /// either through an alteration in its user-level semantics, a parameter - /// added/removed/changed, a dispatchable being removed, a module being removed, or a - /// dispatchable/module changing its index. - /// - /// It need *not* change when a new module is added or when a dispatchable is added. - pub transaction_version: u32, -} diff --git a/core/src/config/default_extrinsic_params.rs b/core/src/config/default_extrinsic_params.rs deleted file mode 100644 index 28cef1ec32..0000000000 --- a/core/src/config/default_extrinsic_params.rs +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::config::transaction_extensions::CheckMortalityParams; - -use super::{Config, HashFor}; -use super::{ExtrinsicParams, transaction_extensions}; - -/// The default [`super::ExtrinsicParams`] implementation understands common signed extensions -/// and how to apply them to a given chain. -pub type DefaultExtrinsicParams = transaction_extensions::AnyOf< - T, - ( - transaction_extensions::VerifySignature, - transaction_extensions::CheckSpecVersion, - transaction_extensions::CheckTxVersion, - transaction_extensions::CheckNonce, - transaction_extensions::CheckGenesis, - transaction_extensions::CheckMortality, - transaction_extensions::ChargeAssetTxPayment, - transaction_extensions::ChargeTransactionPayment, - transaction_extensions::CheckMetadataHash, - ), ->; - -/// A builder that outputs the set of [`super::ExtrinsicParams::Params`] required for -/// [`DefaultExtrinsicParams`]. This may expose methods that aren't applicable to the current -/// chain; such values will simply be ignored if so. -pub struct DefaultExtrinsicParamsBuilder { - /// `None` means the tx will be immortal, else it's mortality is described. - mortality: transaction_extensions::CheckMortalityParams, - /// `None` means the nonce will be automatically set. - nonce: Option, - /// `None` means we'll use the native token. - tip_of_asset_id: Option, - tip: u128, - tip_of: u128, -} - -impl Default for DefaultExtrinsicParamsBuilder { - fn default() -> Self { - Self { - mortality: CheckMortalityParams::default(), - tip: 0, - tip_of: 0, - tip_of_asset_id: None, - nonce: None, - } - } -} - -impl DefaultExtrinsicParamsBuilder { - /// Configure new extrinsic params. We default to providing no tip - /// and using an immortal transaction unless otherwise configured - pub fn new() -> Self { - Default::default() - } - - /// Make the transaction immortal, meaning it will never expire. This means that it could, in - /// theory, be pending for a long time and only be included many blocks into the future. - pub fn immortal(mut self) -> Self { - self.mortality = transaction_extensions::CheckMortalityParams::immortal(); - self - } - - /// Make the transaction mortal, given a number of blocks it will be mortal for from - /// the current block at the time of submission. - /// - /// # Warning - /// - /// This will ultimately return an error if used for creating extrinsic offline, because we need - /// additional information in order to set the mortality properly. - /// - /// When creating offline transactions, you must use [`Self::mortal_from_unchecked`] instead to set - /// the mortality. This provides all of the necessary information which we must otherwise be online - /// in order to obtain. - pub fn mortal(mut self, for_n_blocks: u64) -> Self { - self.mortality = transaction_extensions::CheckMortalityParams::mortal(for_n_blocks); - self - } - - /// Configure a transaction that will be mortal for the number of blocks given, and from the - /// block details provided. Prefer to use [`Self::mortal()`] where possible, which prevents - /// the block number and hash from being misaligned. - pub fn mortal_from_unchecked( - mut self, - for_n_blocks: u64, - from_block_n: u64, - from_block_hash: HashFor, - ) -> Self { - self.mortality = transaction_extensions::CheckMortalityParams::mortal_from_unchecked( - for_n_blocks, - from_block_n, - from_block_hash, - ); - self - } - - /// Provide a specific nonce for the submitter of the extrinsic - pub fn nonce(mut self, nonce: u64) -> Self { - self.nonce = Some(nonce); - self - } - - /// Provide a tip to the block author in the chain's native token. - pub fn tip(mut self, tip: u128) -> Self { - self.tip = tip; - self.tip_of = tip; - self.tip_of_asset_id = None; - self - } - - /// Provide a tip to the block author using the token denominated by the `asset_id` provided. This - /// is not applicable on chains which don't use the `ChargeAssetTxPayment` signed extension; in this - /// case, no tip will be given. - pub fn tip_of(mut self, tip: u128, asset_id: T::AssetId) -> Self { - self.tip = 0; - self.tip_of = tip; - self.tip_of_asset_id = Some(asset_id); - self - } - - /// Build the extrinsic parameters. - pub fn build(self) -> as ExtrinsicParams>::Params { - let check_mortality_params = self.mortality; - - let charge_asset_tx_params = if let Some(asset_id) = self.tip_of_asset_id { - transaction_extensions::ChargeAssetTxPaymentParams::tip_of(self.tip, asset_id) - } else { - transaction_extensions::ChargeAssetTxPaymentParams::tip(self.tip) - }; - - let charge_transaction_params = - transaction_extensions::ChargeTransactionPaymentParams::tip(self.tip); - - let check_nonce_params = if let Some(nonce) = self.nonce { - transaction_extensions::CheckNonceParams::with_nonce(nonce) - } else { - transaction_extensions::CheckNonceParams::from_chain() - }; - - ( - (), - (), - (), - check_nonce_params, - (), - check_mortality_params, - charge_asset_tx_params, - charge_transaction_params, - (), - ) - } -} - -#[cfg(test)] -mod test { - use super::*; - - fn assert_default(_t: T) {} - - #[test] - fn params_are_default() { - let params = DefaultExtrinsicParamsBuilder::::new().build(); - assert_default(params) - } -} diff --git a/core/src/config/extrinsic_params.rs b/core/src/config/extrinsic_params.rs deleted file mode 100644 index bd3168631c..0000000000 --- a/core/src/config/extrinsic_params.rs +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module contains a trait which controls the parameters that must -//! be provided in order to successfully construct an extrinsic. -//! [`crate::config::DefaultExtrinsicParams`] provides a general-purpose -//! implementation of this that will work in many cases. - -use crate::{ - client::ClientState, - config::{Config, HashFor}, - error::ExtrinsicParamsError, -}; -use alloc::vec::Vec; -use core::any::Any; - -/// This trait allows you to configure the "signed extra" and -/// "additional" parameters that are a part of the transaction payload -/// or the signer payload respectively. -pub trait ExtrinsicParams: ExtrinsicParamsEncoder + Sized + Send + 'static { - /// These parameters can be provided to the constructor along with - /// some default parameters that `subxt` understands, in order to - /// help construct your [`ExtrinsicParams`] object. - type Params: Params; - - /// Construct a new instance of our [`ExtrinsicParams`]. - fn new(client: &ClientState, params: Self::Params) -> Result; -} - -/// This trait is expected to be implemented for any [`ExtrinsicParams`], and -/// defines how to encode the "additional" and "extra" params. Both functions -/// are optional and will encode nothing by default. -pub trait ExtrinsicParamsEncoder: 'static { - /// This is expected to SCALE encode the transaction extension data to some - /// buffer that has been provided. This data is attached to the transaction - /// and also (by default) attached to the signer payload which is signed to - /// provide a signature for the transaction. - /// - /// If [`ExtrinsicParamsEncoder::encode_signer_payload_value_to`] is implemented, - /// then that will be used instead when generating a signer payload. Useful for - /// eg the `VerifySignature` extension, which is send with the transaction but - /// is not a part of the signer payload. - fn encode_value_to(&self, _v: &mut Vec) {} - - /// See [`ExtrinsicParamsEncoder::encode_value_to`]. This defaults to calling that - /// method, but if implemented will dictate what is encoded to the signer payload. - fn encode_signer_payload_value_to(&self, v: &mut Vec) { - self.encode_value_to(v); - } - - /// This is expected to SCALE encode the "implicit" (formally "additional") - /// parameters to some buffer that has been provided. These parameters are - /// _not_ sent along with the transaction, but are taken into account when - /// signing it, meaning the client and node must agree on their values. - fn encode_implicit_to(&self, _v: &mut Vec) {} - - /// Set the signature. This happens after we have constructed the extrinsic params, - /// and so is defined here rather than on the params, below. We need to use `&dyn Any` - /// to keep this trait object safe, but can downcast in the impls. - /// - /// # Panics - /// - /// Implementations of this will likely try to downcast the provided `account_id` - /// and `signature` into `T::AccountId` and `T::Signature` (where `T: Config`), and are - /// free to panic if this downcasting does not succeed. - /// - /// In typical usage, this is not a problem, since this method is only called internally - /// and provided values which line up with the relevant `Config`. In theory though, this - /// method can be called manually with any types, hence this warning. - fn inject_signature(&mut self, _account_id: &dyn Any, _signature: &dyn Any) {} -} - -/// The parameters (ie [`ExtrinsicParams::Params`]) can also have data injected into them, -/// allowing Subxt to retrieve data from the chain and amend the parameters with it when -/// online. -pub trait Params { - /// Set the account nonce. - fn inject_account_nonce(&mut self, _nonce: u64) {} - /// Set the current block. - fn inject_block(&mut self, _number: u64, _hash: HashFor) {} -} - -impl Params for () {} - -macro_rules! impl_tuples { - ($($ident:ident $index:tt),+) => { - impl ),+> Params for ($($ident,)+){ - fn inject_account_nonce(&mut self, nonce: u64) { - $(self.$index.inject_account_nonce(nonce);)+ - } - - fn inject_block(&mut self, number: u64, hash: HashFor) { - $(self.$index.inject_block(number, hash);)+ - } - } - } -} - -#[rustfmt::skip] -const _: () = { - impl_tuples!(A 0); - impl_tuples!(A 0, B 1); - impl_tuples!(A 0, B 1, C 2); - impl_tuples!(A 0, B 1, C 2, D 3); - impl_tuples!(A 0, B 1, C 2, D 3, E 4); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22, X 23); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22, X 23, Y 24); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22, X 23, Y 24, Z 25); -}; diff --git a/core/src/config/mod.rs b/core/src/config/mod.rs deleted file mode 100644 index af3d74af23..0000000000 --- a/core/src/config/mod.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module provides a [`Config`] type, which is used to define various -//! types that are important in order to speak to a particular chain. -//! [`SubstrateConfig`] provides a default set of these types suitable for the -//! default Substrate node implementation, and [`PolkadotConfig`] for a -//! Polkadot node. - -mod default_extrinsic_params; -mod extrinsic_params; - -pub mod polkadot; -pub mod substrate; -pub mod transaction_extensions; - -use codec::{Decode, Encode}; -use core::fmt::Debug; -use scale_decode::DecodeAsType; -use scale_encode::EncodeAsType; -use serde::{Serialize, de::DeserializeOwned}; -use subxt_metadata::Metadata; - -pub use default_extrinsic_params::{DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder}; -pub use extrinsic_params::{ExtrinsicParams, ExtrinsicParamsEncoder}; -pub use polkadot::{PolkadotConfig, PolkadotExtrinsicParams, PolkadotExtrinsicParamsBuilder}; -pub use substrate::{SubstrateConfig, SubstrateExtrinsicParams, SubstrateExtrinsicParamsBuilder}; -pub use transaction_extensions::TransactionExtension; - -/// Runtime types. -// Note: the `Send + Sync + 'static` bound isn't strictly required, but currently deriving -// TypeInfo automatically applies a 'static bound to all generic types (including this one), -// And we want the compiler to infer `Send` and `Sync` OK for things which have `T: Config` -// rather than having to `unsafe impl` them ourselves. -pub trait Config: Sized + Send + Sync + 'static { - /// The account ID type. - type AccountId: Debug + Clone + Encode + Decode + Serialize + Send; - - /// The address type. - type Address: Debug + Encode + From; - - /// The signature type. - type Signature: Debug + Clone + Encode + Decode + Send; - - /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). - type Hasher: Debug + Clone + Copy + Hasher + Send + Sync; - - /// The block header. - type Header: Debug + Header + Sync + Send + DeserializeOwned + Clone; - - /// This type defines the extrinsic extra and additional parameters. - type ExtrinsicParams: ExtrinsicParams; - - /// This is used to identify an asset in the `ChargeAssetTxPayment` signed extension. - type AssetId: Debug + Clone + Encode + DecodeAsType + EncodeAsType + Send; -} - -/// Given some [`Config`], this returns the type of hash used. -pub type HashFor = <::Hasher as Hasher>::Output; - -/// given some [`Config`], this return the other params needed for its `ExtrinsicParams`. -pub type ParamsFor = <::ExtrinsicParams as ExtrinsicParams>::Params; - -/// Block hashes must conform to a bunch of things to be used in Subxt. -pub trait Hash: - Debug - + Copy - + Send - + Sync - + Decode - + AsRef<[u8]> - + Serialize - + DeserializeOwned - + Encode - + PartialEq - + Eq - + core::hash::Hash -{ -} -impl Hash for T where - T: Debug - + Copy - + Send - + Sync - + Decode - + AsRef<[u8]> - + Serialize - + DeserializeOwned - + Encode - + PartialEq - + Eq - + core::hash::Hash -{ -} - -/// This represents the hasher used by a node to hash things like block headers -/// and extrinsics. -pub trait Hasher { - /// The type given back from the hash operation - type Output: Hash; - - /// Construct a new hasher. - fn new(metadata: &Metadata) -> Self; - - /// Hash some bytes to the given output type. - fn hash(&self, s: &[u8]) -> Self::Output; - - /// Hash some SCALE encodable type to the given output type. - fn hash_of(&self, s: &S) -> Self::Output { - let out = s.encode(); - self.hash(&out) - } -} - -/// This represents the block header type used by a node. -pub trait Header: Sized + Encode + Decode { - /// The block number type for this header. - type Number: Into; - /// The hasher used to hash this header. - type Hasher: Hasher; - - /// Return the block number of this header. - fn number(&self) -> Self::Number; - - /// Hash this header. - fn hash_with(&self, hasher: Self::Hasher) -> ::Output { - hasher.hash_of(self) - } -} diff --git a/core/src/config/polkadot.rs b/core/src/config/polkadot.rs deleted file mode 100644 index 1996d9b756..0000000000 --- a/core/src/config/polkadot.rs +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Polkadot specific configuration - -use super::{Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder}; - -use crate::config::SubstrateConfig; -pub use crate::utils::{AccountId32, MultiAddress, MultiSignature}; -pub use primitive_types::{H256, U256}; - -/// Default set of commonly used types by Polkadot nodes. -// Note: The trait implementations exist just to make life easier, -// but shouldn't strictly be necessary since users can't instantiate this type. -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] -pub enum PolkadotConfig {} - -impl Config for PolkadotConfig { - type AccountId = ::AccountId; - type Signature = ::Signature; - type Hasher = ::Hasher; - type Header = ::Header; - type AssetId = ::AssetId; - - // Address on Polkadot has no account index, whereas it's u32 on - // the default substrate dev node. - type Address = MultiAddress; - - // These are the same as the default substrate node, but redefined - // because we need to pass the PolkadotConfig trait as a param. - type ExtrinsicParams = PolkadotExtrinsicParams; -} - -/// A struct representing the signed extra and additional parameters required -/// to construct a transaction for a polkadot node. -pub type PolkadotExtrinsicParams = DefaultExtrinsicParams; - -/// A builder which leads to [`PolkadotExtrinsicParams`] being constructed. -/// This is what you provide to methods like `sign_and_submit()`. -pub type PolkadotExtrinsicParamsBuilder = DefaultExtrinsicParamsBuilder; diff --git a/core/src/config/substrate.rs b/core/src/config/substrate.rs deleted file mode 100644 index 4695e97303..0000000000 --- a/core/src/config/substrate.rs +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Substrate specific configuration - -use super::{Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, Hasher, Header}; -pub use crate::utils::{AccountId32, MultiAddress, MultiSignature}; -use alloc::format; -use alloc::vec::Vec; -use codec::{Decode, Encode}; -pub use primitive_types::{H256, U256}; -use serde::{Deserialize, Serialize}; -use subxt_metadata::Metadata; - -/// Default set of commonly used types by Substrate runtimes. -// Note: We only use this at the type level, so it should be impossible to -// create an instance of it. -// The trait implementations exist just to make life easier, -// but shouldn't strictly be necessary since users can't instantiate this type. -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] -pub enum SubstrateConfig {} - -impl Config for SubstrateConfig { - type AccountId = AccountId32; - type Address = MultiAddress; - type Signature = MultiSignature; - type Hasher = DynamicHasher256; - type Header = SubstrateHeader; - type ExtrinsicParams = SubstrateExtrinsicParams; - type AssetId = u32; -} - -/// A struct representing the signed extra and additional parameters required -/// to construct a transaction for the default substrate node. -pub type SubstrateExtrinsicParams = DefaultExtrinsicParams; - -/// A builder which leads to [`SubstrateExtrinsicParams`] being constructed. -/// This is what you provide to methods like `sign_and_submit()`. -pub type SubstrateExtrinsicParamsBuilder = DefaultExtrinsicParamsBuilder; - -/// A hasher (ie implements [`Hasher`]) which hashes values using the blaks2_256 algorithm. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct BlakeTwo256; - -impl Hasher for BlakeTwo256 { - type Output = H256; - - fn new(_metadata: &Metadata) -> Self { - Self - } - - fn hash(&self, s: &[u8]) -> Self::Output { - sp_crypto_hashing::blake2_256(s).into() - } -} - -/// A hasher (ie implements [`Hasher`]) which inspects the runtime metadata to decide how to -/// hash types, falling back to blake2_256 if the hasher information is not available. -/// -/// Currently this hasher supports only `BlakeTwo256` and `Keccak256` hashing methods. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct DynamicHasher256(HashType); - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum HashType { - // Most chains use this: - BlakeTwo256, - // Chains like Hyperbridge use this (tends to be eth compatible chains) - Keccak256, - // If we don't have V16 metadata, we'll emit this and default to BlakeTwo256. - Unknown, -} - -impl Hasher for DynamicHasher256 { - type Output = H256; - - fn new(metadata: &Metadata) -> Self { - // Determine the Hash associated type used for the current chain, if possible. - let Some(system_pallet) = metadata.pallet_by_name("System") else { - return Self(HashType::Unknown); - }; - let Some(hash_ty_id) = system_pallet.associated_type_id("Hashing") else { - return Self(HashType::Unknown); - }; - - let ty = metadata - .types() - .resolve(hash_ty_id) - .expect("Type information for 'Hashing' associated type should be in metadata"); - - let hash_type = match ty.path.ident().as_deref().unwrap_or("") { - "BlakeTwo256" => HashType::BlakeTwo256, - "Keccak256" => HashType::Keccak256, - _ => HashType::Unknown, - }; - - Self(hash_type) - } - - fn hash(&self, s: &[u8]) -> Self::Output { - match self.0 { - HashType::BlakeTwo256 | HashType::Unknown => sp_crypto_hashing::blake2_256(s).into(), - HashType::Keccak256 => sp_crypto_hashing::keccak_256(s).into(), - } - } -} - -/// A generic Substrate header type, adapted from `sp_runtime::generic::Header`. -/// The block number and hasher can be configured to adapt this for other nodes. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct SubstrateHeader + TryFrom, H: Hasher> { - /// The parent hash. - pub parent_hash: H::Output, - /// The block number. - #[serde( - serialize_with = "serialize_number", - deserialize_with = "deserialize_number" - )] - #[codec(compact)] - pub number: N, - /// The state trie merkle root - pub state_root: H::Output, - /// The merkle root of the extrinsics. - pub extrinsics_root: H::Output, - /// A chain-specific digest of data useful for light clients or referencing auxiliary data. - pub digest: Digest, -} - -impl Header for SubstrateHeader -where - N: Copy + Into + Into + TryFrom + Encode, - H: Hasher, - SubstrateHeader: Encode + Decode, -{ - type Number = N; - type Hasher = H; - - fn number(&self) -> Self::Number { - self.number - } -} - -/// Generic header digest. From `sp_runtime::generic::digest`. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)] -pub struct Digest { - /// A list of digest items. - pub logs: Vec, -} - -/// Digest item that is able to encode/decode 'system' digest items and -/// provide opaque access to other items. From `sp_runtime::generic::digest`. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum DigestItem { - /// A pre-runtime digest. - /// - /// These are messages from the consensus engine to the runtime, although - /// the consensus engine can (and should) read them itself to avoid - /// code and state duplication. It is erroneous for a runtime to produce - /// these, but this is not (yet) checked. - /// - /// NOTE: the runtime is not allowed to panic or fail in an `on_initialize` - /// call if an expected `PreRuntime` digest is not present. It is the - /// responsibility of a external block verifier to check this. Runtime API calls - /// will initialize the block without pre-runtime digests, so initialization - /// cannot fail when they are missing. - PreRuntime(ConsensusEngineId, Vec), - - /// A message from the runtime to the consensus engine. This should *never* - /// be generated by the native code of any consensus engine, but this is not - /// checked (yet). - Consensus(ConsensusEngineId, Vec), - - /// Put a Seal on it. This is only used by native code, and is never seen - /// by runtimes. - Seal(ConsensusEngineId, Vec), - - /// Some other thing. Unsupported and experimental. - Other(Vec), - - /// An indication for the light clients that the runtime execution - /// environment is updated. - /// - /// Currently this is triggered when: - /// 1. Runtime code blob is changed or - /// 2. `heap_pages` value is changed. - RuntimeEnvironmentUpdated, -} - -// From sp_runtime::generic, DigestItem enum indexes are encoded using this: -#[repr(u32)] -#[derive(Encode, Decode)] -enum DigestItemType { - Other = 0u32, - Consensus = 4u32, - Seal = 5u32, - PreRuntime = 6u32, - RuntimeEnvironmentUpdated = 8u32, -} -impl Encode for DigestItem { - fn encode(&self) -> Vec { - let mut v = Vec::new(); - - match self { - Self::Consensus(val, data) => { - DigestItemType::Consensus.encode_to(&mut v); - (val, data).encode_to(&mut v); - } - Self::Seal(val, sig) => { - DigestItemType::Seal.encode_to(&mut v); - (val, sig).encode_to(&mut v); - } - Self::PreRuntime(val, data) => { - DigestItemType::PreRuntime.encode_to(&mut v); - (val, data).encode_to(&mut v); - } - Self::Other(val) => { - DigestItemType::Other.encode_to(&mut v); - val.encode_to(&mut v); - } - Self::RuntimeEnvironmentUpdated => { - DigestItemType::RuntimeEnvironmentUpdated.encode_to(&mut v); - } - } - - v - } -} -impl Decode for DigestItem { - fn decode(input: &mut I) -> Result { - let item_type: DigestItemType = Decode::decode(input)?; - match item_type { - DigestItemType::PreRuntime => { - let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(Self::PreRuntime(vals.0, vals.1)) - } - DigestItemType::Consensus => { - let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(Self::Consensus(vals.0, vals.1)) - } - DigestItemType::Seal => { - let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(Self::Seal(vals.0, vals.1)) - } - DigestItemType::Other => Ok(Self::Other(Decode::decode(input)?)), - DigestItemType::RuntimeEnvironmentUpdated => Ok(Self::RuntimeEnvironmentUpdated), - } - } -} - -/// Consensus engine unique ID. From `sp_runtime::ConsensusEngineId`. -pub type ConsensusEngineId = [u8; 4]; - -impl serde::Serialize for DigestItem { - fn serialize(&self, seq: S) -> Result - where - S: serde::Serializer, - { - self.using_encoded(|bytes| impl_serde::serialize::serialize(bytes, seq)) - } -} - -impl<'a> serde::Deserialize<'a> for DigestItem { - fn deserialize(de: D) -> Result - where - D: serde::Deserializer<'a>, - { - let r = impl_serde::serialize::deserialize(de)?; - Decode::decode(&mut &r[..]) - .map_err(|e| serde::de::Error::custom(format!("Decode error: {e}"))) - } -} - -fn serialize_number>(val: &T, s: S) -> Result -where - S: serde::Serializer, -{ - let u256: U256 = (*val).into(); - serde::Serialize::serialize(&u256, s) -} - -fn deserialize_number<'a, D, T: TryFrom>(d: D) -> Result -where - D: serde::Deserializer<'a>, -{ - // At the time of writing, Smoldot gives back block numbers in numeric rather - // than hex format. So let's support deserializing from both here: - let number_or_hex = NumberOrHex::deserialize(d)?; - let u256 = number_or_hex.into_u256(); - TryFrom::try_from(u256).map_err(|_| serde::de::Error::custom("Try from failed")) -} - -/// A number type that can be serialized both as a number or a string that encodes a number in a -/// string. -/// -/// We allow two representations of the block number as input. Either we deserialize to the type -/// that is specified in the block type or we attempt to parse given hex value. -/// -/// The primary motivation for having this type is to avoid overflows when using big integers in -/// JavaScript (which we consider as an important RPC API consumer). -#[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] -#[serde(untagged)] -pub enum NumberOrHex { - /// The number represented directly. - Number(u64), - /// Hex representation of the number. - Hex(U256), -} - -impl NumberOrHex { - /// Converts this number into an U256. - pub fn into_u256(self) -> U256 { - match self { - NumberOrHex::Number(n) => n.into(), - NumberOrHex::Hex(h) => h, - } - } -} - -impl From for U256 { - fn from(num_or_hex: NumberOrHex) -> U256 { - num_or_hex.into_u256() - } -} - -macro_rules! into_number_or_hex { - ($($t: ty)+) => { - $( - impl From<$t> for NumberOrHex { - fn from(x: $t) -> Self { - NumberOrHex::Number(x.into()) - } - } - )+ - } -} -into_number_or_hex!(u8 u16 u32 u64); - -impl From for NumberOrHex { - fn from(n: u128) -> Self { - NumberOrHex::Hex(n.into()) - } -} - -impl From for NumberOrHex { - fn from(n: U256) -> Self { - NumberOrHex::Hex(n) - } -} - -#[cfg(test)] -mod test { - use super::*; - - // Smoldot returns numeric block numbers in the header at the time of writing; - // ensure we can deserialize them properly. - #[test] - fn can_deserialize_numeric_block_number() { - let numeric_block_number_json = r#" - { - "digest": { - "logs": [] - }, - "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "number": 4, - "parentHash": "0xcb2690b2c85ceab55be03fc7f7f5f3857e7efeb7a020600ebd4331e10be2f7a5", - "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - "#; - - let header: SubstrateHeader = - serde_json::from_str(numeric_block_number_json).expect("valid block header"); - assert_eq!(header.number(), 4); - } - - // Substrate returns hex block numbers; ensure we can also deserialize those OK. - #[test] - fn can_deserialize_hex_block_number() { - let numeric_block_number_json = r#" - { - "digest": { - "logs": [] - }, - "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "number": "0x04", - "parentHash": "0xcb2690b2c85ceab55be03fc7f7f5f3857e7efeb7a020600ebd4331e10be2f7a5", - "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - "#; - - let header: SubstrateHeader = - serde_json::from_str(numeric_block_number_json).expect("valid block header"); - assert_eq!(header.number(), 4); - } -} diff --git a/core/src/config/transaction_extensions.rs b/core/src/config/transaction_extensions.rs deleted file mode 100644 index e59986c601..0000000000 --- a/core/src/config/transaction_extensions.rs +++ /dev/null @@ -1,707 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module contains implementations for common transaction extensions, each -//! of which implements [`TransactionExtension`], and can be used in conjunction with -//! [`AnyOf`] to configure the set of transaction extensions which are known about -//! when interacting with a chain. - -use super::extrinsic_params::ExtrinsicParams; -use crate::client::ClientState; -use crate::config::ExtrinsicParamsEncoder; -use crate::config::{Config, HashFor}; -use crate::error::ExtrinsicParamsError; -use crate::utils::{Era, Static}; -use alloc::borrow::ToOwned; -use alloc::boxed::Box; -use alloc::vec::Vec; -use codec::{Compact, Encode}; -use core::any::Any; -use core::fmt::Debug; -use derive_where::derive_where; -use hashbrown::HashMap; -use scale_decode::DecodeAsType; -use scale_info::PortableRegistry; - -// Re-export this here; it's a bit generically named to be re-exported from ::config. -pub use super::extrinsic_params::Params; - -/// A single [`TransactionExtension`] has a unique name, but is otherwise the -/// same as [`ExtrinsicParams`] in describing how to encode the extra and -/// additional data. -pub trait TransactionExtension: ExtrinsicParams { - /// The type representing the `extra` / value bytes of a transaction extension. - /// Decoding from this type should be symmetrical to the respective - /// `ExtrinsicParamsEncoder::encode_value_to()` implementation of this transaction extension. - type Decoded: DecodeAsType; - - /// This should return true if the transaction extension matches the details given. - /// Often, this will involve just checking that the identifier given matches that of the - /// extension in question. - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool; -} - -/// The [`VerifySignature`] extension. For V5 General transactions, this is how a signature -/// is provided. The signature is constructed by signing a payload which contains the -/// transaction call data as well as the encoded "additional" bytes for any extensions _after_ -/// this one in the list. -pub struct VerifySignature(VerifySignatureDetails); - -impl ExtrinsicParams for VerifySignature { - type Params = (); - - fn new(_client: &ClientState, _params: Self::Params) -> Result { - Ok(VerifySignature(VerifySignatureDetails::Disabled)) - } -} - -impl ExtrinsicParamsEncoder for VerifySignature { - fn encode_value_to(&self, v: &mut Vec) { - self.0.encode_to(v); - } - fn encode_signer_payload_value_to(&self, v: &mut Vec) { - // This extension is never encoded to the signer payload, and extensions - // prior to this are ignored when creating said payload, so clear anything - // we've seen so far. - v.clear(); - } - fn encode_implicit_to(&self, v: &mut Vec) { - // We only use the "implicit" data for extensions _after_ this one - // in the pipeline to form the signer payload. Thus, clear anything - // we've seen so far. - v.clear(); - } - - fn inject_signature(&mut self, account: &dyn Any, signature: &dyn Any) { - // Downcast refs back to concrete types (we use `&dyn Any`` so that the trait remains object safe) - let account = account - .downcast_ref::() - .expect("A T::AccountId should have been provided") - .clone(); - let signature = signature - .downcast_ref::() - .expect("A T::Signature should have been provided") - .clone(); - - // The signature is not set through params, only here, once given by a user: - self.0 = VerifySignatureDetails::Signed { signature, account } - } -} - -impl TransactionExtension for VerifySignature { - type Decoded = Static>; - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "VerifySignature" - } -} - -/// This allows a signature to be provided to the [`VerifySignature`] transaction extension. -// Dev note: this must encode identically to https://github.com/paritytech/polkadot-sdk/blob/fd72d58313c297a10600037ce1bb88ec958d722e/substrate/frame/verify-signature/src/extension.rs#L43 -#[derive(codec::Encode, codec::Decode)] -pub enum VerifySignatureDetails { - /// A signature has been provided. - Signed { - /// The signature. - signature: T::Signature, - /// The account that generated the signature. - account: T::AccountId, - }, - /// No signature was provided. - Disabled, -} - -/// The [`CheckMetadataHash`] transaction extension. -pub struct CheckMetadataHash { - // Eventually we might provide or calculate the metadata hash here, - // but for now we never provide a hash and so this is empty. -} - -impl ExtrinsicParams for CheckMetadataHash { - type Params = (); - - fn new(_client: &ClientState, _params: Self::Params) -> Result { - Ok(CheckMetadataHash {}) - } -} - -impl ExtrinsicParamsEncoder for CheckMetadataHash { - fn encode_value_to(&self, v: &mut Vec) { - // A single 0 byte in the TX payload indicates that the chain should - // _not_ expect any metadata hash to exist in the signer payload. - 0u8.encode_to(v); - } - fn encode_implicit_to(&self, v: &mut Vec) { - // We provide no metadata hash in the signer payload to align with the above. - None::<()>.encode_to(v); - } -} - -impl TransactionExtension for CheckMetadataHash { - type Decoded = CheckMetadataHashMode; - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "CheckMetadataHash" - } -} - -/// Is metadata checking enabled or disabled? -// Dev note: The "Disabled" and "Enabled" variant names match those that the -// transaction extension will be encoded with, in order that DecodeAsType will work -// properly. -#[derive(Copy, Clone, Debug, DecodeAsType)] -pub enum CheckMetadataHashMode { - /// No hash was provided in the signer payload. - Disabled, - /// A hash was provided in the signer payload. - Enabled, -} - -impl CheckMetadataHashMode { - /// Is metadata checking enabled or disabled for this transaction? - pub fn is_enabled(&self) -> bool { - match self { - CheckMetadataHashMode::Disabled => false, - CheckMetadataHashMode::Enabled => true, - } - } -} - -/// The [`CheckSpecVersion`] transaction extension. -pub struct CheckSpecVersion(u32); - -impl ExtrinsicParams for CheckSpecVersion { - type Params = (); - - fn new(client: &ClientState, _params: Self::Params) -> Result { - Ok(CheckSpecVersion(client.runtime_version.spec_version)) - } -} - -impl ExtrinsicParamsEncoder for CheckSpecVersion { - fn encode_implicit_to(&self, v: &mut Vec) { - self.0.encode_to(v); - } -} - -impl TransactionExtension for CheckSpecVersion { - type Decoded = (); - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "CheckSpecVersion" - } -} - -/// The [`CheckNonce`] transaction extension. -pub struct CheckNonce(u64); - -impl ExtrinsicParams for CheckNonce { - type Params = CheckNonceParams; - - fn new(_client: &ClientState, params: Self::Params) -> Result { - Ok(CheckNonce(params.0.unwrap_or(0))) - } -} - -impl ExtrinsicParamsEncoder for CheckNonce { - fn encode_value_to(&self, v: &mut Vec) { - Compact(self.0).encode_to(v); - } -} - -impl TransactionExtension for CheckNonce { - type Decoded = u64; - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "CheckNonce" - } -} - -/// Configure the nonce used. -#[derive(Debug, Clone, Default)] -pub struct CheckNonceParams(Option); - -impl CheckNonceParams { - /// Retrieve the nonce from the chain and use that. - pub fn from_chain() -> Self { - Self(None) - } - /// Manually set an account nonce to use. - pub fn with_nonce(nonce: u64) -> Self { - Self(Some(nonce)) - } -} - -impl Params for CheckNonceParams { - fn inject_account_nonce(&mut self, nonce: u64) { - if self.0.is_none() { - self.0 = Some(nonce) - } - } -} - -/// The [`CheckTxVersion`] transaction extension. -pub struct CheckTxVersion(u32); - -impl ExtrinsicParams for CheckTxVersion { - type Params = (); - - fn new(client: &ClientState, _params: Self::Params) -> Result { - Ok(CheckTxVersion(client.runtime_version.transaction_version)) - } -} - -impl ExtrinsicParamsEncoder for CheckTxVersion { - fn encode_implicit_to(&self, v: &mut Vec) { - self.0.encode_to(v); - } -} - -impl TransactionExtension for CheckTxVersion { - type Decoded = (); - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "CheckTxVersion" - } -} - -/// The [`CheckGenesis`] transaction extension. -pub struct CheckGenesis(HashFor); - -impl ExtrinsicParams for CheckGenesis { - type Params = (); - - fn new(client: &ClientState, _params: Self::Params) -> Result { - Ok(CheckGenesis(client.genesis_hash)) - } -} - -impl ExtrinsicParamsEncoder for CheckGenesis { - fn encode_implicit_to(&self, v: &mut Vec) { - self.0.encode_to(v); - } -} - -impl TransactionExtension for CheckGenesis { - type Decoded = (); - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "CheckGenesis" - } -} - -/// The [`CheckMortality`] transaction extension. -pub struct CheckMortality { - params: CheckMortalityParamsInner, - genesis_hash: HashFor, -} - -impl ExtrinsicParams for CheckMortality { - type Params = CheckMortalityParams; - - fn new(client: &ClientState, params: Self::Params) -> Result { - // If a user has explicitly configured the transaction to be mortal for n blocks, but we get - // to this stage and no injected information was able to turn this into MortalFromBlock{..}, - // then we hit an error as we are unable to construct a mortal transaction here. - if matches!(¶ms.0, CheckMortalityParamsInner::MortalForBlocks(_)) { - return Err(ExtrinsicParamsError::custom( - "CheckMortality: We cannot construct an offline extrinsic with only the number of blocks it is mortal for. Use mortal_from_unchecked instead.", - )); - } - - Ok(CheckMortality { - // if nothing has been explicitly configured, we will have a mortal transaction - // valid for 32 blocks if block info is available. - params: params.0, - genesis_hash: client.genesis_hash, - }) - } -} - -impl ExtrinsicParamsEncoder for CheckMortality { - fn encode_value_to(&self, v: &mut Vec) { - match &self.params { - CheckMortalityParamsInner::MortalFromBlock { - for_n_blocks, - from_block_n, - .. - } => { - Era::mortal(*for_n_blocks, *from_block_n).encode_to(v); - } - _ => { - // Note: if we see `CheckMortalityInner::MortalForBlocks`, then it means the user has - // configured a block to be mortal for N blocks, but the current block was never injected, - // so we don't know where to start from and default back to building an immortal tx. - Era::Immortal.encode_to(v); - } - } - } - fn encode_implicit_to(&self, v: &mut Vec) { - match &self.params { - CheckMortalityParamsInner::MortalFromBlock { - from_block_hash, .. - } => { - from_block_hash.encode_to(v); - } - _ => { - self.genesis_hash.encode_to(v); - } - } - } -} - -impl TransactionExtension for CheckMortality { - type Decoded = Era; - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "CheckMortality" - } -} - -/// Parameters to configure the [`CheckMortality`] transaction extension. -pub struct CheckMortalityParams(CheckMortalityParamsInner); - -enum CheckMortalityParamsInner { - /// The transaction will be immortal. - Immortal, - /// The transaction is mortal for N blocks. This must be "upgraded" into - /// [`CheckMortalityParamsInner::MortalFromBlock`] to ultimately work. - MortalForBlocks(u64), - /// The transaction is mortal for N blocks, but if it cannot be "upgraded", - /// then it will be set to immortal instead. This is the default if unset. - MortalForBlocksOrImmortalIfNotPossible(u64), - /// The transaction is mortal and all of the relevant information is provided. - MortalFromBlock { - for_n_blocks: u64, - from_block_n: u64, - from_block_hash: HashFor, - }, -} - -impl Default for CheckMortalityParams { - fn default() -> Self { - // default to being mortal for 32 blocks if possible, else immortal: - CheckMortalityParams(CheckMortalityParamsInner::MortalForBlocksOrImmortalIfNotPossible(32)) - } -} - -impl CheckMortalityParams { - /// Configure a transaction that will be mortal for the number of blocks given. - pub fn mortal(for_n_blocks: u64) -> Self { - Self(CheckMortalityParamsInner::MortalForBlocks(for_n_blocks)) - } - - /// Configure a transaction that will be mortal for the number of blocks given, - /// and from the block details provided. Prefer to use [`CheckMortalityParams::mortal()`] - /// where possible, which prevents the block number and hash from being misaligned. - pub fn mortal_from_unchecked( - for_n_blocks: u64, - from_block_n: u64, - from_block_hash: HashFor, - ) -> Self { - Self(CheckMortalityParamsInner::MortalFromBlock { - for_n_blocks, - from_block_n, - from_block_hash, - }) - } - /// An immortal transaction. - pub fn immortal() -> Self { - Self(CheckMortalityParamsInner::Immortal) - } -} - -impl Params for CheckMortalityParams { - fn inject_block(&mut self, from_block_n: u64, from_block_hash: HashFor) { - match &self.0 { - CheckMortalityParamsInner::MortalForBlocks(n) - | CheckMortalityParamsInner::MortalForBlocksOrImmortalIfNotPossible(n) => { - self.0 = CheckMortalityParamsInner::MortalFromBlock { - for_n_blocks: *n, - from_block_n, - from_block_hash, - } - } - _ => { - // Don't change anything if explicit Immortal or explicit block set. - } - } - } -} - -/// The [`ChargeAssetTxPayment`] transaction extension. -#[derive(DecodeAsType)] -#[derive_where(Clone, Debug; T::AssetId)] -#[decode_as_type(trait_bounds = "T::AssetId: DecodeAsType")] -pub struct ChargeAssetTxPayment { - tip: Compact, - asset_id: Option, -} - -impl ChargeAssetTxPayment { - /// Tip to the extrinsic author in the native chain token. - pub fn tip(&self) -> u128 { - self.tip.0 - } - - /// Tip to the extrinsic author using the asset ID given. - pub fn asset_id(&self) -> Option<&T::AssetId> { - self.asset_id.as_ref() - } -} - -impl ExtrinsicParams for ChargeAssetTxPayment { - type Params = ChargeAssetTxPaymentParams; - - fn new(_client: &ClientState, params: Self::Params) -> Result { - Ok(ChargeAssetTxPayment { - tip: Compact(params.tip), - asset_id: params.asset_id, - }) - } -} - -impl ExtrinsicParamsEncoder for ChargeAssetTxPayment { - fn encode_value_to(&self, v: &mut Vec) { - (self.tip, &self.asset_id).encode_to(v); - } -} - -impl TransactionExtension for ChargeAssetTxPayment { - type Decoded = Self; - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "ChargeAssetTxPayment" - } -} - -/// Parameters to configure the [`ChargeAssetTxPayment`] transaction extension. -pub struct ChargeAssetTxPaymentParams { - tip: u128, - asset_id: Option, -} - -impl Default for ChargeAssetTxPaymentParams { - fn default() -> Self { - ChargeAssetTxPaymentParams { - tip: Default::default(), - asset_id: Default::default(), - } - } -} - -impl ChargeAssetTxPaymentParams { - /// Don't provide a tip to the extrinsic author. - pub fn no_tip() -> Self { - ChargeAssetTxPaymentParams { - tip: 0, - asset_id: None, - } - } - /// Tip the extrinsic author in the native chain token. - pub fn tip(tip: u128) -> Self { - ChargeAssetTxPaymentParams { - tip, - asset_id: None, - } - } - /// Tip the extrinsic author using the asset ID given. - pub fn tip_of(tip: u128, asset_id: T::AssetId) -> Self { - ChargeAssetTxPaymentParams { - tip, - asset_id: Some(asset_id), - } - } -} - -impl Params for ChargeAssetTxPaymentParams {} - -/// The [`ChargeTransactionPayment`] transaction extension. -#[derive(Clone, Debug, DecodeAsType)] -pub struct ChargeTransactionPayment { - tip: Compact, -} - -impl ChargeTransactionPayment { - /// Tip to the extrinsic author in the native chain token. - pub fn tip(&self) -> u128 { - self.tip.0 - } -} - -impl ExtrinsicParams for ChargeTransactionPayment { - type Params = ChargeTransactionPaymentParams; - - fn new(_client: &ClientState, params: Self::Params) -> Result { - Ok(ChargeTransactionPayment { - tip: Compact(params.tip), - }) - } -} - -impl ExtrinsicParamsEncoder for ChargeTransactionPayment { - fn encode_value_to(&self, v: &mut Vec) { - self.tip.encode_to(v); - } -} - -impl TransactionExtension for ChargeTransactionPayment { - type Decoded = Self; - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "ChargeTransactionPayment" - } -} - -/// Parameters to configure the [`ChargeTransactionPayment`] transaction extension. -#[derive(Default)] -pub struct ChargeTransactionPaymentParams { - tip: u128, -} - -impl ChargeTransactionPaymentParams { - /// Don't provide a tip to the extrinsic author. - pub fn no_tip() -> Self { - ChargeTransactionPaymentParams { tip: 0 } - } - /// Tip the extrinsic author in the native chain token. - pub fn tip(tip: u128) -> Self { - ChargeTransactionPaymentParams { tip } - } -} - -impl Params for ChargeTransactionPaymentParams {} - -/// This accepts a tuple of [`TransactionExtension`]s, and will dynamically make use of whichever -/// ones are actually required for the chain in the correct order, ignoring the rest. This -/// is a sensible default, and allows for a single configuration to work across multiple chains. -pub struct AnyOf { - params: Vec>, - _marker: core::marker::PhantomData<(T, Params)>, -} - -macro_rules! impl_tuples { - ($($ident:ident $index:tt),+) => { - // We do some magic when the tuple is wrapped in AnyOf. We - // look at the metadata, and use this to select and make use of only the extensions - // that we actually need for the chain we're dealing with. - impl ExtrinsicParams for AnyOf - where - T: Config, - $($ident: TransactionExtension,)+ - { - type Params = ($($ident::Params,)+); - - fn new( - client: &ClientState, - params: Self::Params, - ) -> Result { - let metadata = &client.metadata; - let types = metadata.types(); - - // For each transaction extension in the tuple, find the matching index in the metadata, if - // there is one, and add it to a map with that index as the key. - let mut exts_by_index = HashMap::new(); - $({ - for (idx, e) in metadata.extrinsic().transaction_extensions_to_use_for_encoding().enumerate() { - // Skip over any exts that have a match already: - if exts_by_index.contains_key(&idx) { - continue - } - // Break and record as soon as we find a match: - if $ident::matches(e.identifier(), e.extra_ty(), types) { - let ext = $ident::new(client, params.$index)?; - let boxed_ext: Box = Box::new(ext); - exts_by_index.insert(idx, boxed_ext); - break - } - } - })+ - - // Next, turn these into an ordered vec, erroring if we haven't matched on any exts yet. - let mut params = Vec::new(); - for (idx, e) in metadata.extrinsic().transaction_extensions_to_use_for_encoding().enumerate() { - let Some(ext) = exts_by_index.remove(&idx) else { - if is_type_empty(e.extra_ty(), types) { - continue - } else { - return Err(ExtrinsicParamsError::UnknownTransactionExtension(e.identifier().to_owned())); - } - }; - params.push(ext); - } - - Ok(AnyOf { - params, - _marker: core::marker::PhantomData - }) - } - } - - impl ExtrinsicParamsEncoder for AnyOf - where - T: Config, - $($ident: TransactionExtension,)+ - { - fn encode_value_to(&self, v: &mut Vec) { - for ext in &self.params { - ext.encode_value_to(v); - } - } - fn encode_signer_payload_value_to(&self, v: &mut Vec) { - for ext in &self.params { - ext.encode_signer_payload_value_to(v); - } - } - fn encode_implicit_to(&self, v: &mut Vec) { - for ext in &self.params { - ext.encode_implicit_to(v); - } - } - fn inject_signature(&mut self, account_id: &dyn Any, signature: &dyn Any) { - for ext in &mut self.params { - ext.inject_signature(account_id, signature); - } - } - } - } -} - -#[rustfmt::skip] -const _: () = { - impl_tuples!(A 0); - impl_tuples!(A 0, B 1); - impl_tuples!(A 0, B 1, C 2); - impl_tuples!(A 0, B 1, C 2, D 3); - impl_tuples!(A 0, B 1, C 2, D 3, E 4); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, U 19); - impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, U 19, V 20); -}; - -/// Checks to see whether the type being given is empty, ie would require -/// 0 bytes to encode. -fn is_type_empty(type_id: u32, types: &scale_info::PortableRegistry) -> bool { - let Some(ty) = types.resolve(type_id) else { - // Can't resolve; type may not be empty. Not expected to hit this. - return false; - }; - - use scale_info::TypeDef; - match &ty.type_def { - TypeDef::Composite(c) => c.fields.iter().all(|f| is_type_empty(f.ty.id, types)), - TypeDef::Array(a) => a.len == 0 || is_type_empty(a.type_param.id, types), - TypeDef::Tuple(t) => t.fields.iter().all(|f| is_type_empty(f.id, types)), - // Explicitly list these in case any additions are made in the future. - TypeDef::BitSequence(_) - | TypeDef::Variant(_) - | TypeDef::Sequence(_) - | TypeDef::Compact(_) - | TypeDef::Primitive(_) => false, - } -} diff --git a/core/src/constants/address.rs b/core/src/constants/address.rs deleted file mode 100644 index 42f82dd504..0000000000 --- a/core/src/constants/address.rs +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Construct addresses to access constants with. - -use alloc::borrow::Cow; -use alloc::string::String; -use derive_where::derive_where; -use scale_decode::DecodeAsType; - -/// This represents a constant address. Anything implementing this trait -/// can be used to fetch constants. -pub trait Address { - /// The target type of the value that lives at this address. - type Target: DecodeAsType; - - /// The name of the pallet that the constant lives under. - fn pallet_name(&self) -> &str; - - /// The name of the constant in a given pallet. - fn constant_name(&self) -> &str; - - /// An optional hash which, if present, will be checked against - /// the node metadata to confirm that the return type matches what - /// we are expecting. - fn validation_hash(&self) -> Option<[u8; 32]> { - None - } -} - -// Any reference to an address is a valid address. -impl Address for &'_ A { - type Target = A::Target; - - fn pallet_name(&self) -> &str { - A::pallet_name(*self) - } - - fn constant_name(&self) -> &str { - A::constant_name(*self) - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - A::validation_hash(*self) - } -} - -// (str, str) and similar are valid addresses. -impl, B: AsRef> Address for (A, B) { - type Target = scale_value::Value; - - fn pallet_name(&self) -> &str { - self.0.as_ref() - } - - fn constant_name(&self) -> &str { - self.1.as_ref() - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - None - } -} - -/// This represents the address of a constant. -#[derive_where(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] -pub struct StaticAddress { - pallet_name: Cow<'static, str>, - constant_name: Cow<'static, str>, - constant_hash: Option<[u8; 32]>, - _marker: core::marker::PhantomData, -} - -/// A dynamic lookup address to access a constant. -pub type DynamicAddress = StaticAddress; - -impl StaticAddress { - /// Create a new [`StaticAddress`] to use to look up a constant. - pub fn new(pallet_name: impl Into, constant_name: impl Into) -> Self { - Self { - pallet_name: Cow::Owned(pallet_name.into()), - constant_name: Cow::Owned(constant_name.into()), - constant_hash: None, - _marker: core::marker::PhantomData, - } - } - - /// Create a new [`StaticAddress`] that will be validated - /// against node metadata using the hash given. - #[doc(hidden)] - pub fn new_static( - pallet_name: &'static str, - constant_name: &'static str, - hash: [u8; 32], - ) -> Self { - Self { - pallet_name: Cow::Borrowed(pallet_name), - constant_name: Cow::Borrowed(constant_name), - constant_hash: Some(hash), - _marker: core::marker::PhantomData, - } - } - - /// Do not validate this constant prior to accessing it. - pub fn unvalidated(self) -> Self { - Self { - pallet_name: self.pallet_name, - constant_name: self.constant_name, - constant_hash: None, - _marker: self._marker, - } - } -} - -impl Address for StaticAddress { - type Target = ReturnTy; - - fn pallet_name(&self) -> &str { - &self.pallet_name - } - - fn constant_name(&self) -> &str { - &self.constant_name - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - self.constant_hash - } -} - -/// Construct a new dynamic constant lookup. -pub fn dynamic( - pallet_name: impl Into, - constant_name: impl Into, -) -> DynamicAddress { - DynamicAddress::new(pallet_name, constant_name) -} diff --git a/core/src/constants/mod.rs b/core/src/constants/mod.rs deleted file mode 100644 index 19ee7b8f10..0000000000 --- a/core/src/constants/mod.rs +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Access constants from metadata. -//! -//! Use [`get`] to retrieve a constant from some metadata, or [`validate`] to check that a static -//! constant address lines up with the value seen in the metadata. -//! -//! # Example -//! -//! ```rust -//! use subxt_macro::subxt; -//! use subxt_core::constants; -//! use subxt_core::Metadata; -//! -//! // If we generate types without `subxt`, we need to point to `::subxt_core`: -//! #[subxt( -//! crate = "::subxt_core", -//! runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale", -//! )] -//! pub mod polkadot {} -//! -//! // Some metadata we'd like to access constants in: -//! let metadata_bytes = include_bytes!("../../../artifacts/polkadot_metadata_small.scale"); -//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); -//! -//! // We can use a static address to obtain some constant: -//! let address = polkadot::constants().balances().existential_deposit(); -//! -//! // This validates that the address given is in line with the metadata -//! // we're trying to access the constant in: -//! constants::validate(&address, &metadata).expect("is valid"); -//! -//! // This acquires the constant (and internally also validates it): -//! let ed = constants::get(&address, &metadata).expect("can decode constant"); -//! -//! assert_eq!(ed, 33_333_333); -//! ``` - -pub mod address; - -use crate::Metadata; -use crate::error::ConstantError; -use address::Address; -use alloc::borrow::ToOwned; -use alloc::string::ToString; -use alloc::vec::Vec; -use frame_decode::constants::ConstantTypeInfo; -use scale_decode::IntoVisitor; - -/// When the provided `address` is statically generated via the `#[subxt]` macro, this validates -/// that the shape of the constant value is the same as the shape expected by the static address. -/// -/// When the provided `address` is dynamic (and thus does not come with any expectation of the -/// shape of the constant value), this just returns `Ok(())` -pub fn validate(address: Addr, metadata: &Metadata) -> Result<(), ConstantError> { - if let Some(actual_hash) = address.validation_hash() { - let expected_hash = metadata - .pallet_by_name(address.pallet_name()) - .ok_or_else(|| ConstantError::PalletNameNotFound(address.pallet_name().to_string()))? - .constant_hash(address.constant_name()) - .ok_or_else(|| ConstantError::ConstantNameNotFound { - pallet_name: address.pallet_name().to_string(), - constant_name: address.constant_name().to_owned(), - })?; - if actual_hash != expected_hash { - return Err(ConstantError::IncompatibleCodegen); - } - } - Ok(()) -} - -/// Fetch a constant out of the metadata given a constant address. If the `address` has been -/// statically generated, this will validate that the constant shape is as expected, too. -pub fn get( - address: Addr, - metadata: &Metadata, -) -> Result { - // 1. Validate constant shape if hash given: - validate(&address, metadata)?; - - // 2. Attempt to decode the constant into the type given: - let constant = frame_decode::constants::decode_constant( - address.pallet_name(), - address.constant_name(), - metadata, - metadata.types(), - Addr::Target::into_visitor(), - ) - .map_err(ConstantError::CouldNotDecodeConstant)?; - - Ok(constant) -} - -/// Access the bytes of a constant by the address it is registered under. -pub fn get_bytes( - address: Addr, - metadata: &Metadata, -) -> Result, ConstantError> { - // 1. Validate custom value shape if hash given: - validate(&address, metadata)?; - - // 2. Return the underlying bytes: - let constant = metadata - .constant_info(address.pallet_name(), address.constant_name()) - .map_err(|e| ConstantError::ConstantInfoError(e.into_owned()))?; - Ok(constant.bytes.to_vec()) -} diff --git a/core/src/custom_values/address.rs b/core/src/custom_values/address.rs deleted file mode 100644 index 1046b5dc49..0000000000 --- a/core/src/custom_values/address.rs +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Construct addresses to access custom values with. - -use alloc::borrow::Cow; -use alloc::string::String; -use derive_where::derive_where; -use scale_decode::DecodeAsType; - -/// Use this with [`Address::IsDecodable`]. -pub use crate::utils::{Maybe, No, NoMaybe}; - -/// This represents the address of a custom value in the metadata. -/// Anything that implements it can be used to fetch custom values from the metadata. -/// The trait is implemented by [`str`] for dynamic lookup and [`StaticAddress`] for static queries. -pub trait Address { - /// The type of the custom value. - type Target: DecodeAsType; - /// Should be set to `Yes` for Dynamic values and static values that have a valid type. - /// Should be `No` for custom values, that have an invalid type id. - type IsDecodable: NoMaybe; - - /// the name (key) by which the custom value can be accessed in the metadata. - fn name(&self) -> &str; - - /// An optional hash which, if present, can be checked against node metadata. - fn validation_hash(&self) -> Option<[u8; 32]> { - None - } -} - -// Any reference to an address is a valid address -impl Address for &'_ A { - type Target = A::Target; - type IsDecodable = A::IsDecodable; - - fn name(&self) -> &str { - A::name(*self) - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - A::validation_hash(*self) - } -} - -// Support plain strings for looking up custom values. -impl Address for str { - type Target = scale_value::Value; - type IsDecodable = Maybe; - - fn name(&self) -> &str { - self - } -} - -/// A static address to a custom value. -#[derive_where(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)] -pub struct StaticAddress { - name: Cow<'static, str>, - hash: Option<[u8; 32]>, - marker: core::marker::PhantomData<(ReturnTy, IsDecodable)>, -} - -/// A dynamic address to a custom value. -pub type DynamicAddress = StaticAddress; - -impl StaticAddress { - #[doc(hidden)] - /// Creates a new StaticAddress. - pub fn new_static(name: &'static str, hash: [u8; 32]) -> Self { - Self { - name: Cow::Borrowed(name), - hash: Some(hash), - marker: core::marker::PhantomData, - } - } - - /// Create a new [`StaticAddress`] - pub fn new(name: impl Into) -> Self { - Self { - name: name.into().into(), - hash: None, - marker: core::marker::PhantomData, - } - } - - /// Do not validate this custom value prior to accessing it. - pub fn unvalidated(self) -> Self { - Self { - name: self.name, - hash: None, - marker: self.marker, - } - } -} - -impl Address for StaticAddress { - type Target = Target; - type IsDecodable = IsDecodable; - - fn name(&self) -> &str { - &self.name - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - self.hash - } -} - -/// Construct a new dynamic custom value lookup. -pub fn dynamic( - custom_value_name: impl Into, -) -> DynamicAddress { - DynamicAddress::new(custom_value_name) -} diff --git a/core/src/custom_values/mod.rs b/core/src/custom_values/mod.rs deleted file mode 100644 index 5130ae64fa..0000000000 --- a/core/src/custom_values/mod.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Access custom values from metadata. -//! -//! Use [`get`] to retrieve a custom value from some metadata, or [`validate`] to check that a -//! static custom value address lines up with the value seen in the metadata. -//! -//! # Example -//! -//! ```rust -//! use subxt_macro::subxt; -//! use subxt_core::custom_values; -//! use subxt_core::Metadata; -//! -//! // If we generate types without `subxt`, we need to point to `::subxt_core`: -//! #[subxt( -//! crate = "::subxt_core", -//! runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale", -//! )] -//! pub mod polkadot {} -//! -//! // Some metadata we'd like to access custom values in: -//! let metadata_bytes = include_bytes!("../../../artifacts/polkadot_metadata_small.scale"); -//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); -//! -//! // At the moment, we don't expect to see any custom values in the metadata -//! // for Polkadot, so this will return an error: -//! let err = custom_values::get("Foo", &metadata); -//! ``` - -pub mod address; - -use crate::utils::Maybe; -use crate::{Metadata, error::CustomValueError}; -use address::Address; -use alloc::vec::Vec; -use frame_decode::custom_values::CustomValueTypeInfo; -use scale_decode::IntoVisitor; - -/// Run the validation logic against some custom value address you'd like to access. Returns `Ok(())` -/// if the address is valid (or if it's not possible to check since the address has no validation hash). -/// Returns an error if the address was not valid (wrong name, type or raw bytes) -pub fn validate(address: Addr, metadata: &Metadata) -> Result<(), CustomValueError> { - if let Some(actual_hash) = address.validation_hash() { - let custom = metadata.custom(); - let custom_value = custom - .get(address.name()) - .ok_or_else(|| CustomValueError::NotFound(address.name().into()))?; - let expected_hash = custom_value.hash(); - if actual_hash != expected_hash { - return Err(CustomValueError::IncompatibleCodegen); - } - } - Ok(()) -} - -/// Access a custom value by the address it is registered under. This can be just a [str] to get back a dynamic value, -/// or a static address from the generated static interface to get a value of a static type returned. -pub fn get>( - address: Addr, - metadata: &Metadata, -) -> Result { - // 1. Validate custom value shape if hash given: - validate(&address, metadata)?; - - // 2. Attempt to decode custom value: - let value = frame_decode::custom_values::decode_custom_value( - address.name(), - metadata, - metadata.types(), - Addr::Target::into_visitor(), - ) - .map_err(CustomValueError::CouldNotDecodeCustomValue)?; - - Ok(value) -} - -/// Access the bytes of a custom value by the address it is registered under. -pub fn get_bytes( - address: Addr, - metadata: &Metadata, -) -> Result, CustomValueError> { - // 1. Validate custom value shape if hash given: - validate(&address, metadata)?; - - // 2. Return the underlying bytes: - let custom_value = metadata - .custom_value_info(address.name()) - .map_err(|e| CustomValueError::NotFound(e.not_found))?; - Ok(custom_value.bytes.to_vec()) -} - -#[cfg(test)] -mod tests { - use super::*; - - use alloc::collections::BTreeMap; - use codec::Encode; - use scale_decode::DecodeAsType; - use scale_info::TypeInfo; - use scale_info::form::PortableForm; - - use alloc::borrow::ToOwned; - use alloc::string::String; - use alloc::vec; - - use crate::custom_values; - - #[derive(Debug, Clone, PartialEq, Eq, Encode, TypeInfo, DecodeAsType)] - pub struct Person { - age: u16, - name: String, - } - - fn mock_metadata() -> Metadata { - let person_ty = scale_info::MetaType::new::(); - let unit = scale_info::MetaType::new::<()>(); - let mut types = scale_info::Registry::new(); - let person_ty_id = types.register_type(&person_ty); - let unit_id = types.register_type(&unit); - let types: scale_info::PortableRegistry = types.into(); - - let person = Person { - age: 42, - name: "Neo".into(), - }; - - let person_value_metadata: frame_metadata::v15::CustomValueMetadata = - frame_metadata::v15::CustomValueMetadata { - ty: person_ty_id, - value: person.encode(), - }; - - let frame_metadata = frame_metadata::v15::RuntimeMetadataV15 { - types, - pallets: vec![], - extrinsic: frame_metadata::v15::ExtrinsicMetadata { - version: 0, - address_ty: unit_id, - call_ty: unit_id, - signature_ty: unit_id, - extra_ty: unit_id, - signed_extensions: vec![], - }, - ty: unit_id, - apis: vec![], - outer_enums: frame_metadata::v15::OuterEnums { - call_enum_ty: unit_id, - event_enum_ty: unit_id, - error_enum_ty: unit_id, - }, - custom: frame_metadata::v15::CustomMetadata { - map: BTreeMap::from_iter([("Mr. Robot".to_owned(), person_value_metadata)]), - }, - }; - - let metadata: subxt_metadata::Metadata = frame_metadata.try_into().unwrap(); - metadata - } - - #[test] - fn test_decoding() { - let metadata = mock_metadata(); - - assert!(custom_values::get("Invalid Address", &metadata).is_err()); - - let person_addr = custom_values::address::dynamic::("Mr. Robot"); - let person = custom_values::get(&person_addr, &metadata).unwrap(); - assert_eq!( - person, - Person { - age: 42, - name: "Neo".into() - } - ) - } -} diff --git a/core/src/error.rs b/core/src/error.rs deleted file mode 100644 index a5b259f176..0000000000 --- a/core/src/error.rs +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! The errors that can be emitted in this crate. - -use alloc::boxed::Box; -use alloc::string::String; -use alloc::vec::Vec; -use thiserror::Error as DeriveError; - -/// The error emitted when something goes wrong. -#[derive(Debug, DeriveError)] -#[allow(missing_docs)] -pub enum Error { - #[error(transparent)] - StorageError(#[from] StorageError), - #[error(transparent)] - Extrinsic(#[from] ExtrinsicError), - #[error(transparent)] - Constant(#[from] ConstantError), - #[error(transparent)] - CustomValue(#[from] CustomValueError), - #[error(transparent)] - RuntimeApi(#[from] RuntimeApiError), - #[error(transparent)] - ViewFunction(#[from] ViewFunctionError), - #[error(transparent)] - Events(#[from] EventsError), -} - -#[derive(Debug, DeriveError)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum EventsError { - #[error("Can't decode event: can't decode phase: {0}")] - CannotDecodePhase(codec::Error), - #[error("Can't decode event: can't decode pallet index: {0}")] - CannotDecodePalletIndex(codec::Error), - #[error("Can't decode event: can't decode variant index: {0}")] - CannotDecodeVariantIndex(codec::Error), - #[error("Can't decode event: can't find pallet with index {0}")] - CannotFindPalletWithIndex(u8), - #[error( - "Can't decode event: can't find variant with index {variant_index} in pallet {pallet_name}" - )] - CannotFindVariantWithIndex { - pallet_name: String, - variant_index: u8, - }, - #[error("Can't decode field {field_name:?} in event {pallet_name}.{event_name}: {reason}")] - CannotDecodeFieldInEvent { - pallet_name: String, - event_name: String, - field_name: String, - reason: scale_decode::visitor::DecodeError, - }, - #[error("Can't decode event topics: {0}")] - CannotDecodeEventTopics(codec::Error), - #[error("Can't decode the fields of event {pallet_name}.{event_name}: {reason}")] - CannotDecodeEventFields { - pallet_name: String, - event_name: String, - reason: scale_decode::Error, - }, - #[error("Can't decode event {pallet_name}.{event_name} to Event enum: {reason}")] - CannotDecodeEventEnum { - pallet_name: String, - event_name: String, - reason: scale_decode::Error, - }, -} - -#[derive(Debug, DeriveError)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum ViewFunctionError { - #[error("The static View Function address used is not compatible with the live chain")] - IncompatibleCodegen, - #[error("Can't find View Function: pallet {0} not found")] - PalletNotFound(String), - #[error("Can't find View Function {function_name} in pallet {pallet_name}")] - ViewFunctionNotFound { - pallet_name: String, - function_name: String, - }, - #[error("Failed to encode View Function inputs: {0}")] - CouldNotEncodeInputs(frame_decode::view_functions::ViewFunctionInputsEncodeError), - #[error("Failed to decode View Function: {0}")] - CouldNotDecodeResponse(frame_decode::view_functions::ViewFunctionDecodeError), -} - -#[derive(Debug, DeriveError)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum RuntimeApiError { - #[error("The static Runtime API address used is not compatible with the live chain")] - IncompatibleCodegen, - #[error("Runtime API trait not found: {0}")] - TraitNotFound(String), - #[error("Runtime API method {method_name} not found in trait {trait_name}")] - MethodNotFound { - trait_name: String, - method_name: String, - }, - #[error("Failed to encode Runtime API inputs: {0}")] - CouldNotEncodeInputs(frame_decode::runtime_apis::RuntimeApiInputsEncodeError), - #[error("Failed to decode Runtime API: {0}")] - CouldNotDecodeResponse(frame_decode::runtime_apis::RuntimeApiDecodeError), -} - -#[derive(Debug, DeriveError)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum CustomValueError { - #[error("The static custom value address used is not compatible with the live chain")] - IncompatibleCodegen, - #[error("The custom value '{0}' was not found")] - NotFound(String), - #[error("Failed to decode custom value: {0}")] - CouldNotDecodeCustomValue(frame_decode::custom_values::CustomValueDecodeError), -} - -/// Something went wrong working with a constant. -#[derive(Debug, DeriveError)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum ConstantError { - #[error("The static constant address used is not compatible with the live chain")] - IncompatibleCodegen, - #[error("Can't find constant: pallet with name {0} not found")] - PalletNameNotFound(String), - #[error( - "Constant '{constant_name}' not found in pallet {pallet_name} in the live chain metadata" - )] - ConstantNameNotFound { - pallet_name: String, - constant_name: String, - }, - #[error("Failed to decode constant: {0}")] - CouldNotDecodeConstant(frame_decode::constants::ConstantDecodeError), - #[error("Cannot obtain constant information from metadata: {0}")] - ConstantInfoError(frame_decode::constants::ConstantInfoError<'static>), -} - -/// Something went wrong trying to encode or decode a storage address. -#[derive(Debug, DeriveError)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum StorageError { - #[error("The static storage address used is not compatible with the live chain")] - IncompatibleCodegen, - #[error("Can't find storage value: pallet with name {0} not found")] - PalletNameNotFound(String), - #[error( - "Storage entry '{entry_name}' not found in pallet {pallet_name} in the live chain metadata" - )] - StorageEntryNotFound { - pallet_name: String, - entry_name: String, - }, - #[error("Cannot obtain storage information from metadata: {0}")] - StorageInfoError(frame_decode::storage::StorageInfoError<'static>), - #[error("Cannot encode storage key: {0}")] - StorageKeyEncodeError(frame_decode::storage::StorageKeyEncodeError), - #[error("Cannot create a key to iterate over a plain entry")] - CannotIterPlainEntry { - pallet_name: String, - entry_name: String, - }, - #[error( - "Wrong number of key parts provided to iterate a storage address. We expected at most {max_expected} key parts but got {got} key parts" - )] - WrongNumberOfKeyPartsProvidedForIterating { max_expected: usize, got: usize }, - #[error( - "Wrong number of key parts provided to fetch a storage address. We expected {expected} key parts but got {got} key parts" - )] - WrongNumberOfKeyPartsProvidedForFetching { expected: usize, got: usize }, -} - -#[derive(Debug, DeriveError)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum StorageKeyError { - #[error("Can't decode the storage key: {error}")] - StorageKeyDecodeError { - bytes: Vec, - error: frame_decode::storage::StorageKeyDecodeError, - }, - #[error("Can't decode the values from the storage key: {0}")] - CannotDecodeValuesInKey(frame_decode::storage::StorageKeyValueDecodeError), - #[error( - "Cannot decode storage key: there were leftover bytes, indicating that the decoding failed" - )] - LeftoverBytes { bytes: Vec }, - #[error("Can't decode a single value from the storage key part at index {index}: {error}")] - CannotDecodeValueInKey { - index: usize, - error: scale_decode::Error, - }, -} - -#[derive(Debug, DeriveError)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum StorageValueError { - #[error("Cannot decode storage value: {0}")] - CannotDecode(frame_decode::storage::StorageValueDecodeError), - #[error( - "Cannot decode storage value: there were leftover bytes, indicating that the decoding failed" - )] - LeftoverBytes { bytes: Vec }, -} - -/// An error that can be encountered when constructing a transaction. -#[derive(Debug, DeriveError)] -#[allow(missing_docs)] -pub enum ExtrinsicError { - #[error("The extrinsic payload is not compatible with the live chain")] - IncompatibleCodegen, - #[error("Can't find extrinsic: pallet with name {0} not found")] - PalletNameNotFound(String), - #[error("Can't find extrinsic: call name {call_name} doesn't exist in pallet {pallet_name}")] - CallNameNotFound { - pallet_name: String, - call_name: String, - }, - #[error("Can't encode the extrinsic call data: {0}")] - CannotEncodeCallData(scale_encode::Error), - #[error("Subxt does not support the extrinsic versions expected by the chain")] - UnsupportedVersion, - #[error("Cannot construct the required transaction extensions: {0}")] - Params(#[from] ExtrinsicParamsError), - #[error("Cannot decode transaction extension '{name}': {error}")] - CouldNotDecodeTransactionExtension { - /// The extension name. - name: String, - /// The decode error. - error: scale_decode::Error, - }, - #[error( - "After decoding the extrinsic at index {extrinsic_index}, {num_leftover_bytes} bytes were left, suggesting that decoding may have failed" - )] - LeftoverBytes { - /// Index of the extrinsic that failed to decode. - extrinsic_index: usize, - /// Number of bytes leftover after decoding the extrinsic. - num_leftover_bytes: usize, - }, - #[error("{0}")] - ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt), - #[error("Failed to decode the fields of an extrinsic at index {extrinsic_index}: {error}")] - CannotDecodeFields { - /// Index of the extrinsic whose fields we could not decode - extrinsic_index: usize, - /// The decode error. - error: scale_decode::Error, - }, - #[error("Failed to decode the extrinsic at index {extrinsic_index} to a root enum: {error}")] - CannotDecodeIntoRootExtrinsic { - /// Index of the extrinsic that we failed to decode - extrinsic_index: usize, - /// The decode error. - error: scale_decode::Error, - }, -} - -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -#[error("Cannot decode extrinsic at index {extrinsic_index}: {error}")] -pub struct ExtrinsicDecodeErrorAt { - pub extrinsic_index: usize, - pub error: ExtrinsicDecodeErrorAtReason, -} - -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum ExtrinsicDecodeErrorAtReason { - #[error("{0}")] - DecodeError(frame_decode::extrinsics::ExtrinsicDecodeError), - #[error("Leftover bytes")] - LeftoverBytes(Vec), -} - -/// An error that can be emitted when trying to construct an instance of [`crate::config::ExtrinsicParams`], -/// encode data from the instance, or match on signed extensions. -#[derive(Debug, DeriveError)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum ExtrinsicParamsError { - #[error("Cannot find type id '{type_id} in the metadata (context: {context})")] - MissingTypeId { - /// Type ID. - type_id: u32, - /// Some arbitrary context to help narrow the source of the error. - context: &'static str, - }, - #[error("The chain expects a signed extension with the name {0}, but we did not provide one")] - UnknownTransactionExtension(String), - #[error("Error constructing extrinsic parameters: {0}")] - Custom(Box), -} - -impl ExtrinsicParamsError { - /// Create a custom [`ExtrinsicParamsError`] from a string. - pub fn custom>(error: S) -> Self { - let error: String = error.into(); - let error: Box = Box::from(error); - ExtrinsicParamsError::Custom(error) - } -} - -impl From for ExtrinsicParamsError { - fn from(value: core::convert::Infallible) -> Self { - match value {} - } -} diff --git a/core/src/events.rs b/core/src/events.rs deleted file mode 100644 index 9d38517910..0000000000 --- a/core/src/events.rs +++ /dev/null @@ -1,1022 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Decode and work with events. -//! -//! # Example -//! -//! ```rust -//! use subxt_macro::subxt; -//! use subxt_core::config::PolkadotConfig; -//! use subxt_core::events; -//! use subxt_core::Metadata; -//! use subxt_core::dynamic::Value; -//! -//! // If we generate types without `subxt`, we need to point to `::subxt_core`: -//! #[subxt( -//! crate = "::subxt_core", -//! runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -//! )] -//! pub mod polkadot {} -//! -//! // Some metadata we'll use to work with storage entries: -//! let metadata_bytes = include_bytes!("../../artifacts/polkadot_metadata_full.scale"); -//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); -//! -//! // Some bytes representing events (located in System.Events storage): -//! let event_bytes = hex::decode("1c00000000000000a2e9b53d5517020000000100000000000310c96d901d0102000000020000000408d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27dbeea5a030000000000000000000000000000020000000402d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48102700000000000000000000000000000000020000000407be5ddb1579b72e84524fc29e78609e3caf42e85aa118ebfe0b0ad404b5bdd25fbeea5a030000000000000000000000000000020000002100d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27dbeea5a03000000000000000000000000000000000000000000000000000000000000020000000000426df03e00000000").unwrap(); -//! -//! // We can decode these bytes like so: -//! let evs = events::decode_from::(event_bytes, metadata); -//! -//! // And then do things like iterate over them and inspect details: -//! for ev in evs.iter() { -//! let ev = ev.unwrap(); -//! println!("Index: {}", ev.index()); -//! println!("Name: {}.{}", ev.pallet_name(), ev.variant_name()); -//! println!("Fields: {:?}", ev.decode_as_fields::().unwrap()); -//! } -//! ``` - -use alloc::string::ToString; -use alloc::sync::Arc; -use alloc::vec::Vec; -use codec::{Compact, Decode, Encode}; -use derive_where::derive_where; -use scale_decode::{DecodeAsFields, DecodeAsType}; -use subxt_metadata::PalletMetadata; - -use crate::{ - Metadata, - config::{Config, HashFor}, - error::EventsError, -}; - -/// Create a new [`Events`] instance from the given bytes. -/// -/// This is a shortcut for [`Events::decode_from`]. -pub fn decode_from(event_bytes: Vec, metadata: Metadata) -> Events { - Events::decode_from(event_bytes, metadata) -} - -/// Trait to uniquely identify the events's identity from the runtime metadata. -/// -/// Generated API structures that represent an event implement this trait. -/// -/// The trait is utilized to decode emitted events from a block, via obtaining the -/// form of the `Event` from the metadata. -pub trait StaticEvent: DecodeAsFields { - /// Pallet name. - const PALLET: &'static str; - /// Event name. - const EVENT: &'static str; - - /// Returns true if the given pallet and event names match this event. - fn is_event(pallet: &str, event: &str) -> bool { - Self::PALLET == pallet && Self::EVENT == event - } -} - -/// A collection of events obtained from a block, bundled with the necessary -/// information needed to decode and iterate over them. -#[derive_where(Clone)] -pub struct Events { - metadata: Metadata, - // Note; raw event bytes are prefixed with a Compact containing - // the number of events to be decoded. The start_idx reflects that, so - // that we can skip over those bytes when decoding them - event_bytes: Arc<[u8]>, - start_idx: usize, - num_events: u32, - marker: core::marker::PhantomData, -} - -// Ignore the Metadata when debug-logging events; it's big and distracting. -impl core::fmt::Debug for Events { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("Events") - .field("event_bytes", &self.event_bytes) - .field("start_idx", &self.start_idx) - .field("num_events", &self.num_events) - .finish() - } -} - -impl Events { - /// Create a new [`Events`] instance from the given bytes. - pub fn decode_from(event_bytes: Vec, metadata: Metadata) -> Self { - // event_bytes is a SCALE encoded vector of events. So, pluck the - // compact encoded length from the front, leaving the remaining bytes - // for our iterating to decode. - // - // Note: if we get no bytes back, avoid an error reading vec length - // and default to 0 events. - let cursor = &mut &*event_bytes; - let num_events = >::decode(cursor).unwrap_or(Compact(0)).0; - - // Start decoding after the compact encoded bytes. - let start_idx = event_bytes.len() - cursor.len(); - - Self { - metadata, - event_bytes: event_bytes.into(), - start_idx, - num_events, - marker: core::marker::PhantomData, - } - } - - /// The number of events. - pub fn len(&self) -> u32 { - self.num_events - } - - /// Are there no events in this block? - // Note: mainly here to satisfy clippy. - pub fn is_empty(&self) -> bool { - self.num_events == 0 - } - - /// Return the bytes representing all of the events. - pub fn bytes(&self) -> &[u8] { - &self.event_bytes - } - - /// Iterate over all of the events, using metadata to dynamically - /// decode them as we go, and returning the raw bytes and other associated - /// details. If an error occurs, all subsequent iterations return `None`. - // Dev note: The returned iterator is 'static + Send so that we can box it up and make - // use of it with our `FilterEvents` stuff. - pub fn iter( - &self, - ) -> impl Iterator, EventsError>> + Send + Sync + 'static { - // The event bytes ignoring the compact encoded length on the front: - let event_bytes = self.event_bytes.clone(); - let metadata = self.metadata.clone(); - let num_events = self.num_events; - - let mut pos = self.start_idx; - let mut index = 0; - core::iter::from_fn(move || { - if event_bytes.len() <= pos || num_events == index { - None - } else { - match EventDetails::decode_from(metadata.clone(), event_bytes.clone(), pos, index) { - Ok(event_details) => { - // Skip over decoded bytes in next iteration: - pos += event_details.bytes().len(); - // Increment the index: - index += 1; - // Return the event details: - Some(Ok(event_details)) - } - Err(e) => { - // By setting the position to the "end" of the event bytes, - // the cursor len will become 0 and the iterator will return `None` - // from now on: - pos = event_bytes.len(); - Some(Err(e)) - } - } - } - }) - } - - /// Iterate through the events using metadata to dynamically decode and skip - /// them, and return only those which should decode to the provided `Ev` type. - /// If an error occurs, all subsequent iterations return `None`. - pub fn find(&self) -> impl Iterator> { - self.iter() - .filter_map(|ev| ev.and_then(|ev| ev.as_event::()).transpose()) - } - - /// Iterate through the events using metadata to dynamically decode and skip - /// them, and return the first event found which decodes to the provided `Ev` type. - pub fn find_first(&self) -> Result, EventsError> { - self.find::().next().transpose() - } - - /// Iterate through the events using metadata to dynamically decode and skip - /// them, and return the last event found which decodes to the provided `Ev` type. - pub fn find_last(&self) -> Result, EventsError> { - self.find::().last().transpose() - } - - /// Find an event that decodes to the type provided. Returns true if it was found. - pub fn has(&self) -> Result { - Ok(self.find::().next().transpose()?.is_some()) - } -} - -/// A phase of a block's execution. -#[derive(Copy, Clone, Debug, Eq, PartialEq, Decode, Encode)] -pub enum Phase { - /// Applying an extrinsic. - ApplyExtrinsic(u32), - /// Finalizing the block. - Finalization, - /// Initializing the block. - Initialization, -} - -/// The event details. -#[derive(Debug, Clone)] -pub struct EventDetails { - phase: Phase, - /// The index of the event in the list of events in a given block. - index: u32, - all_bytes: Arc<[u8]>, - // start of the bytes (phase, pallet/variant index and then fields and then topic to follow). - start_idx: usize, - // start of the event (ie pallet/variant index and then the fields and topic after). - event_start_idx: usize, - // start of the fields (ie after phase and pallet/variant index). - event_fields_start_idx: usize, - // end of the fields. - event_fields_end_idx: usize, - // end of everything (fields + topics) - end_idx: usize, - metadata: Metadata, - topics: Vec>, -} - -impl EventDetails { - /// Attempt to dynamically decode a single event from our events input. - fn decode_from( - metadata: Metadata, - all_bytes: Arc<[u8]>, - start_idx: usize, - index: u32, - ) -> Result, EventsError> { - let input = &mut &all_bytes[start_idx..]; - - let phase = Phase::decode(input).map_err(EventsError::CannotDecodePhase)?; - - let event_start_idx = all_bytes.len() - input.len(); - - let pallet_index = u8::decode(input).map_err(EventsError::CannotDecodePalletIndex)?; - let variant_index = u8::decode(input).map_err(EventsError::CannotDecodeVariantIndex)?; - - let event_fields_start_idx = all_bytes.len() - input.len(); - - // Get metadata for the event: - let event_pallet = metadata - .pallet_by_event_index(pallet_index) - .ok_or_else(|| EventsError::CannotFindPalletWithIndex(pallet_index))?; - let event_variant = event_pallet - .event_variant_by_index(variant_index) - .ok_or_else(|| EventsError::CannotFindVariantWithIndex { - pallet_name: event_pallet.name().to_string(), - variant_index, - })?; - - tracing::debug!( - "Decoding Event '{}::{}'", - event_pallet.name(), - &event_variant.name - ); - - // Skip over the bytes belonging to this event. - for field_metadata in &event_variant.fields { - // Skip over the bytes for this field: - scale_decode::visitor::decode_with_visitor( - input, - field_metadata.ty.id, - metadata.types(), - scale_decode::visitor::IgnoreVisitor::new(), - ) - .map_err(|e| EventsError::CannotDecodeFieldInEvent { - pallet_name: event_pallet.name().to_string(), - event_name: event_variant.name.clone(), - field_name: field_metadata - .name - .clone() - .unwrap_or("".to_string()), - reason: e, - })?; - } - - // the end of the field bytes. - let event_fields_end_idx = all_bytes.len() - input.len(); - - // topics come after the event data in EventRecord. - let topics = - Vec::>::decode(input).map_err(EventsError::CannotDecodeEventTopics)?; - - // what bytes did we skip over in total, including topics. - let end_idx = all_bytes.len() - input.len(); - - Ok(EventDetails { - phase, - index, - start_idx, - event_start_idx, - event_fields_start_idx, - event_fields_end_idx, - end_idx, - all_bytes, - metadata, - topics, - }) - } - - /// When was the event produced? - pub fn phase(&self) -> Phase { - self.phase - } - - /// What index is this event in the stored events for this block. - pub fn index(&self) -> u32 { - self.index - } - - /// The index of the pallet that the event originated from. - pub fn pallet_index(&self) -> u8 { - // Note: never panics; we expect these bytes to exist - // in order that the EventDetails could be created. - self.all_bytes[self.event_fields_start_idx - 2] - } - - /// The index of the event variant that the event originated from. - pub fn variant_index(&self) -> u8 { - // Note: never panics; we expect these bytes to exist - // in order that the EventDetails could be created. - self.all_bytes[self.event_fields_start_idx - 1] - } - - /// The name of the pallet from whence the Event originated. - pub fn pallet_name(&self) -> &str { - self.event_metadata().pallet.name() - } - - /// The name of the event (ie the name of the variant that it corresponds to). - pub fn variant_name(&self) -> &str { - &self.event_metadata().variant.name - } - - /// Fetch details from the metadata for this event. - pub fn event_metadata(&self) -> EventMetadataDetails<'_> { - let pallet = self - .metadata - .pallet_by_event_index(self.pallet_index()) - .expect("event pallet to be found; we did this already during decoding"); - let variant = pallet - .event_variant_by_index(self.variant_index()) - .expect("event variant to be found; we did this already during decoding"); - - EventMetadataDetails { pallet, variant } - } - - /// Return _all_ of the bytes representing this event, which include, in order: - /// - The phase. - /// - Pallet and event index. - /// - Event fields. - /// - Event Topics. - pub fn bytes(&self) -> &[u8] { - &self.all_bytes[self.start_idx..self.end_idx] - } - - /// Return the bytes representing the fields stored in this event. - pub fn field_bytes(&self) -> &[u8] { - &self.all_bytes[self.event_fields_start_idx..self.event_fields_end_idx] - } - - /// Decode and provide the event fields back in the form of a [`scale_value::Composite`] - /// type which represents the named or unnamed fields that were present in the event. - pub fn decode_as_fields(&self) -> Result { - let bytes = &mut self.field_bytes(); - let event_metadata = self.event_metadata(); - - let mut fields = event_metadata - .variant - .fields - .iter() - .map(|f| scale_decode::Field::new(f.ty.id, f.name.as_deref())); - - let decoded = - E::decode_as_fields(bytes, &mut fields, self.metadata.types()).map_err(|e| { - EventsError::CannotDecodeEventFields { - pallet_name: event_metadata.pallet.name().to_string(), - event_name: event_metadata.variant.name.clone(), - reason: e, - } - })?; - - Ok(decoded) - } - - /// Attempt to decode these [`EventDetails`] into a type representing the event fields. - /// Such types are exposed in the codegen as `pallet_name::events::EventName` types. - pub fn as_event(&self) -> Result, EventsError> { - let ev_metadata = self.event_metadata(); - if ev_metadata.pallet.name() == E::PALLET && ev_metadata.variant.name == E::EVENT { - let mut fields = ev_metadata - .variant - .fields - .iter() - .map(|f| scale_decode::Field::new(f.ty.id, f.name.as_deref())); - let decoded = - E::decode_as_fields(&mut self.field_bytes(), &mut fields, self.metadata.types()) - .map_err(|e| EventsError::CannotDecodeEventFields { - pallet_name: E::PALLET.to_string(), - event_name: E::EVENT.to_string(), - reason: e, - })?; - Ok(Some(decoded)) - } else { - Ok(None) - } - } - - /// Attempt to decode these [`EventDetails`] into a root event type (which includes - /// the pallet and event enum variants as well as the event fields). A compatible - /// type for this is exposed via static codegen as a root level `Event` type. - pub fn as_root_event(&self) -> Result { - let bytes = &self.all_bytes[self.event_start_idx..self.event_fields_end_idx]; - - let decoded = E::decode_as_type( - &mut &bytes[..], - self.metadata.outer_enums().event_enum_ty(), - self.metadata.types(), - ) - .map_err(|e| { - let md = self.event_metadata(); - EventsError::CannotDecodeEventEnum { - pallet_name: md.pallet.name().to_string(), - event_name: md.variant.name.clone(), - reason: e, - } - })?; - - Ok(decoded) - } - - /// Return the topics associated with this event. - pub fn topics(&self) -> &[HashFor] { - &self.topics - } -} - -/// Details for the given event plucked from the metadata. -pub struct EventMetadataDetails<'a> { - /// Metadata for the pallet that the event belongs to. - pub pallet: PalletMetadata<'a>, - /// Metadata for the variant which describes the pallet events. - pub variant: &'a scale_info::Variant, -} - -/// Event related test utilities used outside this module. -#[cfg(test)] -pub(crate) mod test_utils { - use super::*; - use crate::config::{HashFor, SubstrateConfig}; - use codec::Encode; - use frame_metadata::{ - RuntimeMetadataPrefixed, - v15::{ - CustomMetadata, ExtrinsicMetadata, OuterEnums, PalletEventMetadata, PalletMetadata, - RuntimeMetadataV15, - }, - }; - use scale_info::{TypeInfo, meta_type}; - - /// An "outer" events enum containing exactly one event. - #[derive( - Encode, - Decode, - TypeInfo, - Clone, - Debug, - PartialEq, - Eq, - scale_encode::EncodeAsType, - scale_decode::DecodeAsType, - )] - pub enum AllEvents { - Test(Ev), - } - - /// This encodes to the same format an event is expected to encode to - /// in node System.Events storage. - #[derive(Encode)] - pub struct EventRecord { - phase: Phase, - event: AllEvents, - topics: Vec>, - } - - impl EventRecord { - /// Create a new event record with the given phase, event, and topics. - pub fn new(phase: Phase, event: E, topics: Vec>) -> Self { - Self { - phase, - event: AllEvents::Test(event), - topics, - } - } - } - - /// Build an EventRecord, which encoded events in the format expected - /// to be handed back from storage queries to System.Events. - pub fn event_record(phase: Phase, event: E) -> EventRecord { - EventRecord::new(phase, event, vec![]) - } - - /// Build fake metadata consisting of a single pallet that knows - /// about the event type provided. - pub fn metadata() -> Metadata { - // Extrinsic needs to contain at least the generic type parameter "Call" - // for the metadata to be valid. - // The "Call" type from the metadata is used to decode extrinsics. - // In reality, the extrinsic type has "Call", "Address", "Extra", "Signature" generic types. - #[allow(unused)] - #[derive(TypeInfo)] - struct ExtrinsicType { - call: Call, - } - // Because this type is used to decode extrinsics, we expect this to be a TypeDefVariant. - // Each pallet must contain one single variant. - #[allow(unused)] - #[derive(TypeInfo)] - enum RuntimeCall { - PalletName(Pallet), - } - // The calls of the pallet. - #[allow(unused)] - #[derive(TypeInfo)] - enum Pallet { - #[allow(unused)] - SomeCall, - } - - let pallets = vec![PalletMetadata { - name: "Test", - storage: None, - calls: None, - event: Some(PalletEventMetadata { - ty: meta_type::(), - }), - constants: vec![], - error: None, - index: 0, - docs: vec![], - }]; - - let extrinsic = ExtrinsicMetadata { - version: 0, - signed_extensions: vec![], - address_ty: meta_type::<()>(), - call_ty: meta_type::(), - signature_ty: meta_type::<()>(), - extra_ty: meta_type::<()>(), - }; - - let meta = RuntimeMetadataV15::new( - pallets, - extrinsic, - meta_type::<()>(), - vec![], - OuterEnums { - call_enum_ty: meta_type::<()>(), - event_enum_ty: meta_type::>(), - error_enum_ty: meta_type::<()>(), - }, - CustomMetadata { - map: Default::default(), - }, - ); - let runtime_metadata: RuntimeMetadataPrefixed = meta.into(); - let metadata: subxt_metadata::Metadata = runtime_metadata.try_into().unwrap(); - - metadata - } - - /// Build an `Events` object for test purposes, based on the details provided, - /// and with a default block hash. - pub fn events( - metadata: Metadata, - event_records: Vec>, - ) -> Events { - let num_events = event_records.len() as u32; - let mut event_bytes = Vec::new(); - for ev in event_records { - ev.encode_to(&mut event_bytes); - } - events_raw(metadata, event_bytes, num_events) - } - - /// Much like [`events`], but takes pre-encoded events and event count, so that we can - /// mess with the bytes in tests if we need to. - pub fn events_raw( - metadata: Metadata, - event_bytes: Vec, - num_events: u32, - ) -> Events { - // Prepend compact encoded length to event bytes: - let mut all_event_bytes = Compact(num_events).encode(); - all_event_bytes.extend(event_bytes); - Events::decode_from(all_event_bytes, metadata) - } -} - -#[cfg(test)] -mod tests { - use super::{ - test_utils::{AllEvents, EventRecord, event_record, events, events_raw}, - *, - }; - use crate::config::SubstrateConfig; - use crate::events::Phase; - use codec::Encode; - use primitive_types::H256; - use scale_info::TypeInfo; - use scale_value::Value; - - /// Build a fake wrapped metadata. - fn metadata() -> Metadata { - test_utils::metadata::() - } - - /// [`RawEventDetails`] can be annoying to test, because it contains - /// type info in the decoded field Values. Strip that here so that - /// we can compare fields more easily. - #[derive(Debug, PartialEq, Eq, Clone)] - pub struct TestRawEventDetails { - pub phase: Phase, - pub index: u32, - pub pallet: String, - pub pallet_index: u8, - pub variant: String, - pub variant_index: u8, - pub fields: Vec, - } - - /// Compare some actual [`RawEventDetails`] with a hand-constructed - /// (probably) [`TestRawEventDetails`]. - pub fn assert_raw_events_match( - actual: EventDetails, - expected: TestRawEventDetails, - ) { - let actual_fields_no_context: Vec<_> = actual - .decode_as_fields::>() - .expect("can decode field values (2)") - .into_values() - .map(|value| value.remove_context()) - .collect(); - - // Check each of the other fields: - assert_eq!(actual.phase(), expected.phase); - assert_eq!(actual.index(), expected.index); - assert_eq!(actual.pallet_name(), expected.pallet); - assert_eq!(actual.pallet_index(), expected.pallet_index); - assert_eq!(actual.variant_name(), expected.variant); - assert_eq!(actual.variant_index(), expected.variant_index); - assert_eq!(actual_fields_no_context, expected.fields); - } - - #[test] - fn statically_decode_single_root_event() { - #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo, scale_decode::DecodeAsType)] - enum Event { - A(u8, bool, Vec), - } - - // Create fake metadata that knows about our single event, above: - let metadata = metadata::(); - - // Encode our events in the format we expect back from a node, and - // construct an Events object to iterate them: - let event = Event::A(1, true, vec!["Hi".into()]); - let events = events::( - metadata, - vec![event_record(Phase::ApplyExtrinsic(123), event.clone())], - ); - - let ev = events - .iter() - .next() - .expect("one event expected") - .expect("event should be extracted OK"); - - // This is the line we're testing: - let decoded_event = ev - .as_root_event::>() - .expect("can decode event into root enum again"); - - // It should equal the event we put in: - assert_eq!(decoded_event, AllEvents::Test(event)); - } - - #[test] - fn dynamically_decode_single_event() { - #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo)] - enum Event { - A(u8, bool, Vec), - } - - // Create fake metadata that knows about our single event, above: - let metadata = metadata::(); - - // Encode our events in the format we expect back from a node, and - // construct an Events object to iterate them: - let event = Event::A(1, true, vec!["Hi".into()]); - let events = events::( - metadata, - vec![event_record(Phase::ApplyExtrinsic(123), event)], - ); - - let mut event_details = events.iter(); - assert_raw_events_match( - event_details.next().unwrap().unwrap(), - TestRawEventDetails { - phase: Phase::ApplyExtrinsic(123), - index: 0, - pallet: "Test".to_string(), - pallet_index: 0, - variant: "A".to_string(), - variant_index: 0, - fields: vec![ - Value::u128(1), - Value::bool(true), - Value::unnamed_composite(vec![Value::string("Hi")]), - ], - }, - ); - assert!(event_details.next().is_none()); - } - - #[test] - fn dynamically_decode_multiple_events() { - #[derive(Clone, Copy, Debug, PartialEq, Decode, Encode, TypeInfo)] - enum Event { - A(u8), - B(bool), - } - - // Create fake metadata that knows about our single event, above: - let metadata = metadata::(); - - // Encode our events in the format we expect back from a node, and - // construct an Events object to iterate them: - let event1 = Event::A(1); - let event2 = Event::B(true); - let event3 = Event::A(234); - - let events = events::( - metadata, - vec![ - event_record(Phase::Initialization, event1), - event_record(Phase::ApplyExtrinsic(123), event2), - event_record(Phase::Finalization, event3), - ], - ); - - let mut event_details = events.iter(); - - assert_raw_events_match( - event_details.next().unwrap().unwrap(), - TestRawEventDetails { - index: 0, - phase: Phase::Initialization, - pallet: "Test".to_string(), - pallet_index: 0, - variant: "A".to_string(), - variant_index: 0, - fields: vec![Value::u128(1)], - }, - ); - assert_raw_events_match( - event_details.next().unwrap().unwrap(), - TestRawEventDetails { - index: 1, - phase: Phase::ApplyExtrinsic(123), - pallet: "Test".to_string(), - pallet_index: 0, - variant: "B".to_string(), - variant_index: 1, - fields: vec![Value::bool(true)], - }, - ); - assert_raw_events_match( - event_details.next().unwrap().unwrap(), - TestRawEventDetails { - index: 2, - phase: Phase::Finalization, - pallet: "Test".to_string(), - pallet_index: 0, - variant: "A".to_string(), - variant_index: 0, - fields: vec![Value::u128(234)], - }, - ); - assert!(event_details.next().is_none()); - } - - #[test] - fn dynamically_decode_multiple_events_until_error() { - #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo)] - enum Event { - A(u8), - B(bool), - } - - // Create fake metadata that knows about our single event, above: - let metadata = metadata::(); - - // Encode 2 events: - let mut event_bytes = vec![]; - event_record(Phase::Initialization, Event::A(1)).encode_to(&mut event_bytes); - event_record(Phase::ApplyExtrinsic(123), Event::B(true)).encode_to(&mut event_bytes); - - // Push a few naff bytes to the end (a broken third event): - event_bytes.extend_from_slice(&[3, 127, 45, 0, 2]); - - // Encode our events in the format we expect back from a node, and - // construct an Events object to iterate them: - let events = events_raw( - metadata, - event_bytes, - 3, // 2 "good" events, and then it'll hit the naff bytes. - ); - - let mut events_iter = events.iter(); - assert_raw_events_match( - events_iter.next().unwrap().unwrap(), - TestRawEventDetails { - index: 0, - phase: Phase::Initialization, - pallet: "Test".to_string(), - pallet_index: 0, - variant: "A".to_string(), - variant_index: 0, - fields: vec![Value::u128(1)], - }, - ); - assert_raw_events_match( - events_iter.next().unwrap().unwrap(), - TestRawEventDetails { - index: 1, - phase: Phase::ApplyExtrinsic(123), - pallet: "Test".to_string(), - pallet_index: 0, - variant: "B".to_string(), - variant_index: 1, - fields: vec![Value::bool(true)], - }, - ); - - // We'll hit an error trying to decode the third event: - assert!(events_iter.next().unwrap().is_err()); - // ... and then "None" from then on. - assert!(events_iter.next().is_none()); - assert!(events_iter.next().is_none()); - } - - #[test] - fn compact_event_field() { - #[derive(Clone, Debug, PartialEq, Encode, Decode, TypeInfo)] - enum Event { - A(#[codec(compact)] u32), - } - - // Create fake metadata that knows about our single event, above: - let metadata = metadata::(); - - // Encode our events in the format we expect back from a node, and - // construct an Events object to iterate them: - let events = events::( - metadata, - vec![event_record(Phase::Finalization, Event::A(1))], - ); - - // Dynamically decode: - let mut event_details = events.iter(); - assert_raw_events_match( - event_details.next().unwrap().unwrap(), - TestRawEventDetails { - index: 0, - phase: Phase::Finalization, - pallet: "Test".to_string(), - pallet_index: 0, - variant: "A".to_string(), - variant_index: 0, - fields: vec![Value::u128(1)], - }, - ); - assert!(event_details.next().is_none()); - } - - #[test] - fn compact_wrapper_struct_field() { - #[derive(Clone, Decode, Debug, PartialEq, Encode, TypeInfo)] - enum Event { - A(#[codec(compact)] CompactWrapper), - } - - #[derive(Clone, Decode, Debug, PartialEq, codec::CompactAs, Encode, TypeInfo)] - struct CompactWrapper(u64); - - // Create fake metadata that knows about our single event, above: - let metadata = metadata::(); - - // Encode our events in the format we expect back from a node, and - // construct an Events object to iterate them: - let events = events::( - metadata, - vec![event_record( - Phase::Finalization, - Event::A(CompactWrapper(1)), - )], - ); - - // Dynamically decode: - let mut event_details = events.iter(); - assert_raw_events_match( - event_details.next().unwrap().unwrap(), - TestRawEventDetails { - index: 0, - phase: Phase::Finalization, - pallet: "Test".to_string(), - pallet_index: 0, - variant: "A".to_string(), - variant_index: 0, - fields: vec![Value::unnamed_composite(vec![Value::u128(1)])], - }, - ); - assert!(event_details.next().is_none()); - } - - #[test] - fn event_containing_explicit_index() { - #[derive(Clone, Debug, PartialEq, Eq, Decode, Encode, TypeInfo)] - #[repr(u8)] - #[allow(trivial_numeric_casts, clippy::unnecessary_cast)] // required because the Encode derive produces a warning otherwise - pub enum MyType { - B = 10u8, - } - - #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo)] - enum Event { - A(MyType), - } - - // Create fake metadata that knows about our single event, above: - let metadata = metadata::(); - - // Encode our events in the format we expect back from a node, and - // construct an Events object to iterate them: - let events = events::( - metadata, - vec![event_record(Phase::Finalization, Event::A(MyType::B))], - ); - - // Dynamically decode: - let mut event_details = events.iter(); - assert_raw_events_match( - event_details.next().unwrap().unwrap(), - TestRawEventDetails { - index: 0, - phase: Phase::Finalization, - pallet: "Test".to_string(), - pallet_index: 0, - variant: "A".to_string(), - variant_index: 0, - fields: vec![Value::unnamed_variant("B", vec![])], - }, - ); - assert!(event_details.next().is_none()); - } - - #[test] - fn topics() { - #[derive(Clone, Debug, PartialEq, Decode, Encode, TypeInfo, scale_decode::DecodeAsType)] - enum Event { - A(u8, bool, Vec), - } - - // Create fake metadata that knows about our single event, above: - let metadata = metadata::(); - - // Encode our events in the format we expect back from a node, and - // construct an Events object to iterate them: - let event = Event::A(1, true, vec!["Hi".into()]); - let topics = vec![H256::from_low_u64_le(123), H256::from_low_u64_le(456)]; - let events = events::( - metadata, - vec![EventRecord::new( - Phase::ApplyExtrinsic(123), - event, - topics.clone(), - )], - ); - - let ev = events - .iter() - .next() - .expect("one event expected") - .expect("event should be extracted OK"); - - assert_eq!(topics, ev.topics()); - } -} diff --git a/core/src/lib.rs b/core/src/lib.rs deleted file mode 100644 index 9dcecabf4a..0000000000 --- a/core/src/lib.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # subxt-core -//! -//! A `#[no_std]` compatible subset of the functionality provided in the `subxt` crate. This -//! contains the core logic for encoding and decoding things, but nothing related to networking. -//! -//! Here's an overview of the main things exposed here: -//! -//! - [`blocks`]: decode and explore block bodies. -//! - [`constants`]: access and validate the constant addresses in some metadata. -//! - [`custom_values`]: access and validate the custom value addresses in some metadata. -//! - [`storage`]: construct storage request payloads and decode the results you'd get back. -//! - [`tx`]: construct and sign transactions (extrinsics). -//! - [`runtime_api`]: construct runtime API request payloads and decode the results you'd get back. -//! - [`events`]: decode and explore events. -//! - -#![deny(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] -pub extern crate alloc; - -pub mod blocks; -pub mod client; -pub mod config; -pub mod constants; -pub mod custom_values; -pub mod dynamic; -pub mod error; -pub mod events; -pub mod runtime_api; -pub mod storage; -pub mod tx; -pub mod utils; -pub mod view_functions; - -pub use config::Config; -pub use error::Error; -pub use subxt_metadata::Metadata; - -/// Re-exports of some of the key external crates. -pub mod ext { - pub use codec; - pub use scale_decode; - pub use scale_encode; - pub use scale_value; -} diff --git a/core/src/runtime_api/mod.rs b/core/src/runtime_api/mod.rs deleted file mode 100644 index 81390c2db4..0000000000 --- a/core/src/runtime_api/mod.rs +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Encode runtime API payloads, decode the associated values returned from them, and validate -//! static runtime API payloads. -//! -//! # Example -//! -//! ```rust -//! use subxt_macro::subxt; -//! use subxt_core::runtime_api; -//! use subxt_core::Metadata; -//! -//! // If we generate types without `subxt`, we need to point to `::subxt_core`: -//! #[subxt( -//! crate = "::subxt_core", -//! runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale", -//! )] -//! pub mod polkadot {} -//! -//! // Some metadata we'll use to work with storage entries: -//! let metadata_bytes = include_bytes!("../../../artifacts/polkadot_metadata_small.scale"); -//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); -//! -//! // Build a storage query to access account information. -//! let payload = polkadot::apis().metadata().metadata_versions(); -//! -//! // We can validate that the payload is compatible with the given metadata. -//! runtime_api::validate(&payload, &metadata).unwrap(); -//! -//! // Encode the payload name and arguments to hand to a node: -//! let _call_name = runtime_api::call_name(&payload); -//! let _call_args = runtime_api::call_args(&payload, &metadata).unwrap(); -//! -//! // If we were to obtain a value back from the node, we could -//! // then decode it using the same payload and metadata like so: -//! let value_bytes = hex::decode("080e0000000f000000").unwrap(); -//! let value = runtime_api::decode_value(&mut &*value_bytes, &payload, &metadata).unwrap(); -//! -//! println!("Available metadata versions: {value:?}"); -//! ``` - -pub mod payload; - -use crate::Metadata; -use crate::error::RuntimeApiError; -use alloc::format; -use alloc::string::{String, ToString}; -use alloc::vec::Vec; -use payload::Payload; -use scale_decode::IntoVisitor; - -/// Run the validation logic against some runtime API payload you'd like to use. Returns `Ok(())` -/// if the payload is valid (or if it's not possible to check since the payload has no validation hash). -/// Return an error if the payload was not valid or something went wrong trying to validate it (ie -/// the runtime API in question do not exist at all) -pub fn validate(payload: P, metadata: &Metadata) -> Result<(), RuntimeApiError> { - let Some(hash) = payload.validation_hash() else { - return Ok(()); - }; - - let trait_name = payload.trait_name(); - let method_name = payload.method_name(); - - let api_trait = metadata - .runtime_api_trait_by_name(trait_name) - .ok_or_else(|| RuntimeApiError::TraitNotFound(trait_name.to_string()))?; - let api_method = - api_trait - .method_by_name(method_name) - .ok_or_else(|| RuntimeApiError::MethodNotFound { - trait_name: trait_name.to_string(), - method_name: method_name.to_string(), - })?; - - if hash != api_method.hash() { - Err(RuntimeApiError::IncompatibleCodegen) - } else { - Ok(()) - } -} - -/// Return the name of the runtime API call from the payload. -pub fn call_name(payload: P) -> String { - format!("{}_{}", payload.trait_name(), payload.method_name()) -} - -/// Return the encoded call args given a runtime API payload. -pub fn call_args(payload: P, metadata: &Metadata) -> Result, RuntimeApiError> { - let value = frame_decode::runtime_apis::encode_runtime_api_inputs( - payload.trait_name(), - payload.method_name(), - payload.args(), - metadata, - metadata.types(), - ) - .map_err(RuntimeApiError::CouldNotEncodeInputs)?; - - Ok(value) -} - -/// Decode the value bytes at the location given by the provided runtime API payload. -pub fn decode_value( - bytes: &mut &[u8], - payload: P, - metadata: &Metadata, -) -> Result { - let value = frame_decode::runtime_apis::decode_runtime_api_response( - payload.trait_name(), - payload.method_name(), - bytes, - metadata, - metadata.types(), - P::ReturnType::into_visitor(), - ) - .map_err(RuntimeApiError::CouldNotDecodeResponse)?; - - Ok(value) -} diff --git a/core/src/runtime_api/payload.rs b/core/src/runtime_api/payload.rs deleted file mode 100644 index 6a64fa5f80..0000000000 --- a/core/src/runtime_api/payload.rs +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module contains the trait and types used to represent -//! runtime API calls that can be made. - -use alloc::borrow::Cow; -use alloc::string::String; -use core::marker::PhantomData; -use derive_where::derive_where; -use frame_decode::runtime_apis::IntoEncodableValues; -use scale_decode::DecodeAsType; - -/// This represents a runtime API payload that can be used to call a Runtime API on -/// a chain and decode the response. -pub trait Payload { - /// Type of the arguments. - type ArgsType: IntoEncodableValues; - /// The return type of the function call. - type ReturnType: DecodeAsType; - - /// The runtime API trait name. - fn trait_name(&self) -> &str; - - /// The runtime API method name. - fn method_name(&self) -> &str; - - /// The input arguments. - fn args(&self) -> &Self::ArgsType; - - /// Returns the statically generated validation hash. - fn validation_hash(&self) -> Option<[u8; 32]> { - None - } -} - -// Any reference to a payload is a valid payload. -impl Payload for &'_ P { - type ArgsType = P::ArgsType; - type ReturnType = P::ReturnType; - - fn trait_name(&self) -> &str { - P::trait_name(*self) - } - - fn method_name(&self) -> &str { - P::method_name(*self) - } - - fn args(&self) -> &Self::ArgsType { - P::args(*self) - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - P::validation_hash(*self) - } -} - -/// A runtime API payload containing the generic argument data -/// and interpreting the result of the call as `ReturnTy`. -/// -/// This can be created from static values (ie those generated -/// via the `subxt` macro) or dynamic values via [`dynamic`]. -#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; ArgsType)] -pub struct StaticPayload { - trait_name: Cow<'static, str>, - method_name: Cow<'static, str>, - args: ArgsType, - validation_hash: Option<[u8; 32]>, - _marker: PhantomData, -} - -/// A dynamic runtime API payload. -pub type DynamicPayload = StaticPayload; - -impl Payload - for StaticPayload -{ - type ArgsType = ArgsType; - type ReturnType = ReturnType; - - fn trait_name(&self) -> &str { - &self.trait_name - } - - fn method_name(&self) -> &str { - &self.method_name - } - - fn args(&self) -> &Self::ArgsType { - &self.args - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - self.validation_hash - } -} - -impl StaticPayload { - /// Create a new [`StaticPayload`]. - pub fn new( - trait_name: impl Into, - method_name: impl Into, - args: ArgsType, - ) -> Self { - StaticPayload { - trait_name: trait_name.into().into(), - method_name: method_name.into().into(), - args, - validation_hash: None, - _marker: PhantomData, - } - } - - /// Create a new static [`StaticPayload`] using static function name - /// and scale-encoded argument data. - /// - /// This is only expected to be used from codegen. - #[doc(hidden)] - pub fn new_static( - trait_name: &'static str, - method_name: &'static str, - args: ArgsType, - hash: [u8; 32], - ) -> StaticPayload { - StaticPayload { - trait_name: Cow::Borrowed(trait_name), - method_name: Cow::Borrowed(method_name), - args, - validation_hash: Some(hash), - _marker: core::marker::PhantomData, - } - } - - /// Do not validate this call prior to submitting it. - pub fn unvalidated(self) -> Self { - Self { - validation_hash: None, - ..self - } - } - - /// Returns the trait name. - pub fn trait_name(&self) -> &str { - &self.trait_name - } - - /// Returns the method name. - pub fn method_name(&self) -> &str { - &self.method_name - } -} - -/// Create a new [`DynamicPayload`]. -pub fn dynamic( - trait_name: impl Into, - method_name: impl Into, - args_data: ArgsType, -) -> DynamicPayload { - DynamicPayload::new(trait_name, method_name, args_data) -} diff --git a/core/src/storage/address.rs b/core/src/storage/address.rs deleted file mode 100644 index fe91d8321f..0000000000 --- a/core/src/storage/address.rs +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Construct addresses to access storage entries with. - -use crate::utils::{Maybe, YesMaybe}; -use alloc::borrow::Cow; -use alloc::string::String; -use alloc::vec::Vec; -use frame_decode::storage::{IntoDecodableValues, IntoEncodableValues}; -use scale_decode::DecodeAsType; - -/// A storage address. This allows access to a given storage entry, which can then -/// be iterated over or fetched from by providing the relevant set of keys, or -/// otherwise inspected. -pub trait Address { - /// All of the keys required to get to an individual value at this address. - /// Keys must always impl [`IntoEncodableValues`], and for iteration must - /// also impl [`frame_decode::storage::IntoDecodableValues`]. - type KeyParts: IntoEncodableValues + IntoDecodableValues; - /// Type of the storage value at this location. - type Value: DecodeAsType; - /// Does the address point to a plain value (as opposed to a map)? - /// Set to [`crate::utils::Yes`] to enable APIs which require a map, - /// or [`crate::utils::Maybe`] to enable APIs which allow a map. - type IsPlain: YesMaybe; - - /// The pallet containing this storage entry. - fn pallet_name(&self) -> &str; - - /// The name of the storage entry. - fn entry_name(&self) -> &str; - - /// Return a unique hash for this address which can be used to validate it against metadata. - fn validation_hash(&self) -> Option<[u8; 32]>; -} - -// Any reference to an address is a valid address. -impl Address for &'_ A { - type KeyParts = A::KeyParts; - type Value = A::Value; - type IsPlain = A::IsPlain; - - fn pallet_name(&self) -> &str { - A::pallet_name(*self) - } - - fn entry_name(&self) -> &str { - A::entry_name(*self) - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - A::validation_hash(*self) - } -} - -/// An address which is generated by the static APIs. -pub struct StaticAddress { - pallet_name: Cow<'static, str>, - entry_name: Cow<'static, str>, - validation_hash: Option<[u8; 32]>, - marker: core::marker::PhantomData<(KeyParts, Value, IsPlain)>, -} - -impl Clone for StaticAddress { - fn clone(&self) -> Self { - Self { - pallet_name: self.pallet_name.clone(), - entry_name: self.entry_name.clone(), - validation_hash: self.validation_hash, - marker: self.marker, - } - } -} - -impl core::fmt::Debug for StaticAddress { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("StaticAddress") - .field("pallet_name", &self.pallet_name) - .field("entry_name", &self.entry_name) - .field("validation_hash", &self.validation_hash) - .finish() - } -} - -impl StaticAddress { - /// Create a new [`StaticAddress`] using static strings for the pallet and call name. - /// This is only expected to be used from codegen. - #[doc(hidden)] - pub fn new_static(pallet_name: &'static str, entry_name: &'static str, hash: [u8; 32]) -> Self { - Self { - pallet_name: Cow::Borrowed(pallet_name), - entry_name: Cow::Borrowed(entry_name), - validation_hash: Some(hash), - marker: core::marker::PhantomData, - } - } - - /// Create a new address. - pub fn new(pallet_name: impl Into, entry_name: impl Into) -> Self { - Self { - pallet_name: pallet_name.into().into(), - entry_name: entry_name.into().into(), - validation_hash: None, - marker: core::marker::PhantomData, - } - } - - /// Do not validate this storage entry prior to accessing it. - pub fn unvalidated(mut self) -> Self { - self.validation_hash = None; - self - } -} - -impl Address for StaticAddress -where - KeyParts: IntoEncodableValues + IntoDecodableValues, - Value: DecodeAsType, - IsPlain: YesMaybe, -{ - type KeyParts = KeyParts; - type Value = Value; - type IsPlain = IsPlain; - - fn pallet_name(&self) -> &str { - &self.pallet_name - } - - fn entry_name(&self) -> &str { - &self.entry_name - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - self.validation_hash - } -} - -impl, B: AsRef> Address for (A, B) { - type KeyParts = Vec; - type Value = scale_value::Value; - type IsPlain = Maybe; - - fn pallet_name(&self) -> &str { - self.0.as_ref() - } - - fn entry_name(&self) -> &str { - self.1.as_ref() - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - None - } -} - -/// A dynamic address is simply a [`StaticAddress`] which asserts that the -/// entry *might* be a map and *might* have a default value. -pub type DynamicAddress, Value = scale_value::Value> = - StaticAddress; - -/// Construct a new dynamic storage address. You can define the type of the -/// storage keys and value yourself here, but have no guarantee that they will -/// be correct. -pub fn dynamic( - pallet_name: impl Into, - entry_name: impl Into, -) -> DynamicAddress { - DynamicAddress::::new(pallet_name.into(), entry_name.into()) -} diff --git a/core/src/storage/mod.rs b/core/src/storage/mod.rs deleted file mode 100644 index c694d11358..0000000000 --- a/core/src/storage/mod.rs +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Encode storage keys, decode storage values, and validate static storage addresses. -//! -//! # Example -//! -//! ```rust -//! use subxt_signer::sr25519::dev; -//! use subxt_macro::subxt; -//! use subxt_core::storage; -//! use subxt_core::Metadata; -//! -//! // If we generate types without `subxt`, we need to point to `::subxt_core`: -//! #[subxt( -//! crate = "::subxt_core", -//! runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale", -//! )] -//! pub mod polkadot {} -//! -//! // Some metadata we'll use to work with storage entries: -//! let metadata_bytes = include_bytes!("../../../artifacts/polkadot_metadata_small.scale"); -//! let metadata = Metadata::decode_from(&metadata_bytes[..]).unwrap(); -//! -//! // Build a storage query to access account information. -//! let address = polkadot::storage().system().account(); -//! -//! // We can validate that the address is compatible with the given metadata. -//! storage::validate(&address, &metadata).unwrap(); -//! -//! // We can fetch details about the storage entry associated with this address: -//! let entry = storage::entry(address, &metadata).unwrap(); -//! -//! // .. including generating a key to fetch the entry with: -//! let fetch_key = entry.fetch_key((dev::alice().public_key().into(),)).unwrap(); -//! -//! // .. or generating a key to iterate over entries with at a given depth: -//! let iter_key = entry.iter_key(()).unwrap(); -//! -//! // Given a value, we can decode it: -//! let value_bytes = hex::decode("00000000000000000100000000000000000064a7b3b6e00d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080").unwrap(); -//! let value = entry.value(value_bytes).decode().unwrap(); -//! -//! println!("Alice's account info: {value:?}"); -//! ``` - -mod prefix_of; -mod storage_entry; -mod storage_key; -mod storage_key_value; -mod storage_value; - -pub mod address; - -use crate::{Metadata, error::StorageError}; -use address::Address; -use alloc::string::ToString; - -pub use prefix_of::{EqualOrPrefixOf, PrefixOf}; -pub use storage_entry::{StorageEntry, entry}; -pub use storage_key::{StorageHasher, StorageKey, StorageKeyPart}; -pub use storage_key_value::StorageKeyValue; -pub use storage_value::StorageValue; - -/// When the provided `address` is statically generated via the `#[subxt]` macro, this validates -/// that the shape of the storage value is the same as the shape expected by the static address. -/// -/// When the provided `address` is dynamic (and thus does not come with any expectation of the -/// shape of the constant value), this just returns `Ok(())` -pub fn validate(address: Addr, metadata: &Metadata) -> Result<(), StorageError> { - let Some(hash) = address.validation_hash() else { - return Ok(()); - }; - - let pallet_name = address.pallet_name(); - let entry_name = address.entry_name(); - - let pallet_metadata = metadata - .pallet_by_name(pallet_name) - .ok_or_else(|| StorageError::PalletNameNotFound(pallet_name.to_string()))?; - let storage_hash = pallet_metadata.storage_hash(entry_name).ok_or_else(|| { - StorageError::StorageEntryNotFound { - pallet_name: pallet_name.to_string(), - entry_name: entry_name.to_string(), - } - })?; - - if storage_hash != hash { - Err(StorageError::IncompatibleCodegen) - } else { - Ok(()) - } -} diff --git a/core/src/storage/prefix_of.rs b/core/src/storage/prefix_of.rs deleted file mode 100644 index 170eb6f784..0000000000 --- a/core/src/storage/prefix_of.rs +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use alloc::vec::Vec; -use frame_decode::helpers::IntoEncodableValues; -use scale_encode::EncodeAsType; - -/// For a given set of values that can be used as keys for a storage entry, -/// this is implemented for any prefixes of that set. ie if the keys `(A,B,C)` -/// would access a storage value, then `PrefixOf<(A,B,C)>` is implemented for -/// `(A,B)`, `(A,)` and `()`. -pub trait PrefixOf: IntoEncodableValues {} - -// If T impls PrefixOf, &T impls PrefixOf. -impl> PrefixOf for &T {} - -// Impls for tuples up to length 6 (storage maps rarely require more than 2 entries -// so it's very unlikely we'll ever need to go this deep). -impl PrefixOf<(A,)> for () {} - -impl PrefixOf<(A, B)> for () {} -impl PrefixOf<(A, B)> for (A,) where (A,): IntoEncodableValues {} - -impl PrefixOf<(A, B, C)> for () {} -impl PrefixOf<(A, B, C)> for (A,) where (A,): IntoEncodableValues {} -impl PrefixOf<(A, B, C)> for (A, B) where (A, B): IntoEncodableValues {} - -impl PrefixOf<(A, B, C, D)> for () {} -impl PrefixOf<(A, B, C, D)> for (A,) where (A,): IntoEncodableValues {} -impl PrefixOf<(A, B, C, D)> for (A, B) where (A, B): IntoEncodableValues {} -impl PrefixOf<(A, B, C, D)> for (A, B, C) where (A, B, C): IntoEncodableValues {} - -impl PrefixOf<(A, B, C, D, E)> for () {} -impl PrefixOf<(A, B, C, D, E)> for (A,) where (A,): IntoEncodableValues {} -impl PrefixOf<(A, B, C, D, E)> for (A, B) where (A, B): IntoEncodableValues {} -impl PrefixOf<(A, B, C, D, E)> for (A, B, C) where (A, B, C): IntoEncodableValues {} -impl PrefixOf<(A, B, C, D, E)> for (A, B, C, D) where - (A, B, C, D): IntoEncodableValues -{ -} - -impl PrefixOf<(A, B, C, D, E, F)> for () {} -impl PrefixOf<(A, B, C, D, E, F)> for (A,) where (A,): IntoEncodableValues {} -impl PrefixOf<(A, B, C, D, E, F)> for (A, B) where (A, B): IntoEncodableValues {} -impl PrefixOf<(A, B, C, D, E, F)> for (A, B, C) where - (A, B, C): IntoEncodableValues -{ -} -impl PrefixOf<(A, B, C, D, E, F)> for (A, B, C, D) where - (A, B, C, D): IntoEncodableValues -{ -} -impl PrefixOf<(A, B, C, D, E, F)> for (A, B, C, D, E) where - (A, B, C, D, E): IntoEncodableValues -{ -} - -// Vecs are prefixes of vecs. The length is not statically known and so -// these would be given dynamically only, leaving the correct length to the user. -impl PrefixOf> for Vec {} - -// We don't use arrays in Subxt for storage entry access, but `IntoEncodableValues` -// supports them so let's allow impls which do use them to benefit too. -macro_rules! array_impl { - ($n:literal: $($p:literal)+) => { - $( - impl PrefixOf<[T; $n]> for [T; $p] {} - )+ - } -} - -array_impl!(1: 0); -array_impl!(2: 1 0); -array_impl!(3: 2 1 0); -array_impl!(4: 3 2 1 0); -array_impl!(5: 4 3 2 1 0); -array_impl!(6: 5 4 3 2 1 0); - -/// This is much like [`PrefixOf`] except that it also includes `Self` as an allowed type, -/// where `Self` must impl [`IntoEncodableValues`] just as every [`PrefixOf`] does. -pub trait EqualOrPrefixOf: IntoEncodableValues {} - -// Tuples -macro_rules! tuple_impl_eq { - ($($t:ident)+) => { - // Any T that is a PrefixOf impls EqualOrPrefixOf too - impl <$($t,)+ T: PrefixOf<($($t,)+)>> EqualOrPrefixOf<($($t,)+)> for T {} - // Keys impls EqualOrPrefixOf - impl <$($t),+> EqualOrPrefixOf<($($t,)+)> for ($($t,)+) where ($($t,)+): IntoEncodableValues {} - // &'a Keys impls EqualOrPrefixOf - impl <'a, $($t),+> EqualOrPrefixOf<($($t,)+)> for &'a ($($t,)+) where ($($t,)+): IntoEncodableValues {} - } -} - -tuple_impl_eq!(A); -tuple_impl_eq!(A B); -tuple_impl_eq!(A B C); -tuple_impl_eq!(A B C D); -tuple_impl_eq!(A B C D E); -tuple_impl_eq!(A B C D E F); - -// Vec -impl EqualOrPrefixOf> for Vec {} -impl EqualOrPrefixOf> for &Vec {} - -// Arrays -macro_rules! array_impl_eq { - ($($n:literal)+) => { - $( - impl EqualOrPrefixOf<[A; $n]> for [A; $n] {} - impl <'a, A: EncodeAsType> EqualOrPrefixOf<[A; $n]> for &'a [A; $n] {} - )+ - } -} - -impl EqualOrPrefixOf<[A; N]> for T where T: PrefixOf<[A; N]> {} -array_impl_eq!(1 2 3 4 5 6); - -#[cfg(test)] -mod test { - use super::*; - - struct Test(core::marker::PhantomData); - - impl Test { - fn new() -> Self { - Test(core::marker::PhantomData) - } - fn accepts_prefix_of>(&self, keys: P) { - let _encoder = keys.into_encodable_values(); - } - fn accepts_eq_or_prefix_of>(&self, keys: P) { - let _encoder = keys.into_encodable_values(); - } - } - - #[test] - fn test_prefix_of() { - // In real life we'd have a struct a bit like this: - let t = Test::<(bool, String, u64)>::new(); - - // And we'd want to be able to call some method like this: - //// This shouldn't work: - // t.accepts_prefix_of((true, String::from("hi"), 0)); - t.accepts_prefix_of(&(true, String::from("hi"))); - t.accepts_prefix_of((true, String::from("hi"))); - t.accepts_prefix_of((true,)); - t.accepts_prefix_of(()); - - let t = Test::<[u64; 5]>::new(); - - //// This shouldn't work: - // t.accepts_prefix_of([0,1,2,3,4]); - t.accepts_prefix_of([0, 1, 2, 3]); - t.accepts_prefix_of([0, 1, 2, 3]); - t.accepts_prefix_of([0, 1, 2]); - t.accepts_prefix_of([0, 1]); - t.accepts_prefix_of([0]); - t.accepts_prefix_of([]); - } - - #[test] - fn test_eq_or_prefix_of() { - // In real life we'd have a struct a bit like this: - let t = Test::<(bool, String, u64)>::new(); - - // And we'd want to be able to call some method like this: - t.accepts_eq_or_prefix_of(&(true, String::from("hi"), 0)); - t.accepts_eq_or_prefix_of(&(true, String::from("hi"))); - t.accepts_eq_or_prefix_of((true,)); - t.accepts_eq_or_prefix_of(()); - - t.accepts_eq_or_prefix_of((true, String::from("hi"), 0)); - t.accepts_eq_or_prefix_of((true, String::from("hi"))); - t.accepts_eq_or_prefix_of((true,)); - t.accepts_eq_or_prefix_of(()); - - let t = Test::<[u64; 5]>::new(); - - t.accepts_eq_or_prefix_of([0, 1, 2, 3, 4]); - t.accepts_eq_or_prefix_of([0, 1, 2, 3]); - t.accepts_eq_or_prefix_of([0, 1, 2]); - t.accepts_eq_or_prefix_of([0, 1]); - t.accepts_eq_or_prefix_of([0]); - t.accepts_eq_or_prefix_of([]); - - t.accepts_eq_or_prefix_of([0, 1, 2, 3, 4]); - t.accepts_eq_or_prefix_of([0, 1, 2, 3]); - t.accepts_eq_or_prefix_of([0, 1, 2]); - t.accepts_eq_or_prefix_of([0, 1]); - t.accepts_eq_or_prefix_of([0]); - t.accepts_eq_or_prefix_of([]); - } -} diff --git a/core/src/storage/storage_entry.rs b/core/src/storage/storage_entry.rs deleted file mode 100644 index 0f7efaf6b5..0000000000 --- a/core/src/storage/storage_entry.rs +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::{PrefixOf, StorageKeyValue, StorageValue, address::Address}; -use crate::error::StorageError; -use crate::utils::YesMaybe; -use alloc::sync::Arc; -use alloc::vec::Vec; -use frame_decode::storage::{IntoEncodableValues, StorageInfo}; -use scale_info::PortableRegistry; -use subxt_metadata::Metadata; - -/// Create a [`StorageEntry`] to work with a given storage entry. -pub fn entry<'info, Addr: Address>( - address: Addr, - metadata: &'info Metadata, -) -> Result, StorageError> { - super::validate(&address, metadata)?; - - use frame_decode::storage::StorageTypeInfo; - let types = metadata.types(); - let info = metadata - .storage_info(address.pallet_name(), address.entry_name()) - .map_err(|e| StorageError::StorageInfoError(e.into_owned()))?; - - Ok(StorageEntry(Arc::new(StorageEntryInner { - address, - info: Arc::new(info), - types, - }))) -} - -/// This represents a single storage entry (be it a plain value or map). -pub struct StorageEntry<'info, Addr>(Arc>); - -impl<'info, Addr> Clone for StorageEntry<'info, Addr> { - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - -struct StorageEntryInner<'info, Addr> { - address: Addr, - info: Arc>, - types: &'info PortableRegistry, -} - -impl<'info, Addr: Address> StorageEntry<'info, Addr> { - /// Name of the pallet containing this storage entry. - pub fn pallet_name(&self) -> &str { - self.0.address.pallet_name() - } - - /// Name of the storage entry. - pub fn entry_name(&self) -> &str { - self.0.address.entry_name() - } - - /// Is the storage entry a plain value? - pub fn is_plain(&self) -> bool { - self.0.info.keys.is_empty() - } - - /// Is the storage entry a map? - pub fn is_map(&self) -> bool { - !self.is_plain() - } - - /// Instantiate a [`StorageKeyValue`] for this entry. - /// - /// It is expected that the bytes are obtained by iterating key/value pairs at this address. - pub fn key_value( - &self, - key_bytes: impl Into>, - value_bytes: Vec, - ) -> StorageKeyValue<'info, Addr> { - StorageKeyValue::new( - self.0.info.clone(), - self.0.types, - key_bytes.into(), - value_bytes, - ) - } - - /// Instantiate a [`StorageValue`] for this entry. - /// - /// It is expected that the bytes are obtained by fetching a value at this address. - pub fn value(&self, bytes: Vec) -> StorageValue<'info, Addr::Value> { - StorageValue::new(self.0.info.clone(), self.0.types, bytes) - } - - /// Return the default [`StorageValue`] for this storage entry, if there is one. - pub fn default_value(&self) -> Option> { - self.0.info.default_value.as_deref().map(|default_bytes| { - StorageValue::new(self.0.info.clone(), self.0.types, default_bytes.to_vec()) - }) - } - - /// The keys for plain storage values are always 32 byte hashes. - pub fn key_prefix(&self) -> [u8; 32] { - frame_decode::storage::encode_storage_key_prefix( - self.0.address.pallet_name(), - self.0.address.entry_name(), - ) - } - - // This has a less "strict" type signature and so is just used under the hood. - fn key(&self, key_parts: Keys) -> Result, StorageError> { - let key = frame_decode::storage::encode_storage_key_with_info( - self.0.address.pallet_name(), - self.0.address.entry_name(), - key_parts, - &self.0.info, - self.0.types, - ) - .map_err(StorageError::StorageKeyEncodeError)?; - - Ok(key) - } - - /// This constructs a key suitable for fetching a value at the given map storage address. This will error - /// if we can see that the wrong number of key parts are provided. - pub fn fetch_key(&self, key_parts: Addr::KeyParts) -> Result, StorageError> { - if key_parts.num_encodable_values() != self.0.info.keys.len() { - Err(StorageError::WrongNumberOfKeyPartsProvidedForFetching { - expected: self.0.info.keys.len(), - got: key_parts.num_encodable_values(), - }) - } else { - self.key(key_parts) - } - } - - /// This constructs a key suitable for iterating at the given storage address. This will error - /// if we can see that too many key parts are provided. - pub fn iter_key>( - &self, - key_parts: Keys, - ) -> Result, StorageError> { - if Addr::IsPlain::is_yes() { - Err(StorageError::CannotIterPlainEntry { - pallet_name: self.0.address.pallet_name().into(), - entry_name: self.0.address.entry_name().into(), - }) - } else if key_parts.num_encodable_values() >= self.0.info.keys.len() { - Err(StorageError::WrongNumberOfKeyPartsProvidedForIterating { - max_expected: self.0.info.keys.len() - 1, - got: key_parts.num_encodable_values(), - }) - } else { - self.key(key_parts) - } - } -} diff --git a/core/src/storage/storage_key.rs b/core/src/storage/storage_key.rs deleted file mode 100644 index 1880bef618..0000000000 --- a/core/src/storage/storage_key.rs +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::error::StorageKeyError; -use alloc::sync::Arc; -use core::marker::PhantomData; -use frame_decode::storage::{IntoDecodableValues, StorageInfo, StorageKey as StorageKeyPartInfo}; -use scale_info::PortableRegistry; - -pub use frame_decode::storage::StorageHasher; - -/// This represents the different parts of a storage key. -pub struct StorageKey<'info, KeyParts> { - info: Arc>, - types: &'info PortableRegistry, - bytes: Arc<[u8]>, - marker: PhantomData, -} - -impl<'info, KeyParts: IntoDecodableValues> StorageKey<'info, KeyParts> { - pub(crate) fn new( - info: &StorageInfo<'info, u32>, - types: &'info PortableRegistry, - bytes: Arc<[u8]>, - ) -> Result { - let cursor = &mut &*bytes; - let storage_key_info = frame_decode::storage::decode_storage_key_with_info( - cursor, info, types, - ) - .map_err(|e| StorageKeyError::StorageKeyDecodeError { - bytes: bytes.to_vec(), - error: e, - })?; - - if !cursor.is_empty() { - return Err(StorageKeyError::LeftoverBytes { - bytes: cursor.to_vec(), - }); - } - - Ok(StorageKey { - info: Arc::new(storage_key_info), - types, - bytes, - marker: PhantomData, - }) - } - - /// Attempt to decode the values contained within this storage key. The target type is - /// given by the storage address used to access this entry. To decode into a custom type, - /// use [`Self::parts()`] or [`Self::part()`] and decode each part. - pub fn decode(&self) -> Result { - let values = - frame_decode::storage::decode_storage_key_values(&self.bytes, &self.info, self.types) - .map_err(StorageKeyError::CannotDecodeValuesInKey)?; - - Ok(values) - } - - /// Iterate over the parts of this storage key. Each part of a storage key corresponds to a - /// single value that has been hashed. - pub fn parts(&self) -> impl ExactSizeIterator> { - let parts_len = self.info.parts().len(); - (0..parts_len).map(move |index| StorageKeyPart { - index, - info: self.info.clone(), - types: self.types, - bytes: self.bytes.clone(), - }) - } - - /// Return the part of the storage key at the provided index, or `None` if the index is out of bounds. - pub fn part(&self, index: usize) -> Option> { - if index < self.parts().len() { - Some(StorageKeyPart { - index, - info: self.info.clone(), - types: self.types, - bytes: self.bytes.clone(), - }) - } else { - None - } - } -} - -/// This represents a part of a storage key. -pub struct StorageKeyPart<'info> { - index: usize, - info: Arc>, - types: &'info PortableRegistry, - bytes: Arc<[u8]>, -} - -impl<'info> StorageKeyPart<'info> { - /// Get the raw bytes for this part of the storage key. - pub fn bytes(&self) -> &[u8] { - let part = &self.info[self.index]; - let hash_range = part.hash_range(); - let value_range = part.value().map(|v| v.range()).unwrap_or(core::ops::Range { - start: hash_range.end, - end: hash_range.end, - }); - let combined_range = core::ops::Range { - start: hash_range.start, - end: value_range.end, - }; - &self.bytes[combined_range] - } - - /// Get the hasher that was used to construct this part of the storage key. - pub fn hasher(&self) -> StorageHasher { - self.info[self.index].hasher() - } - - /// For keys that were produced using "concat" or "identity" hashers, the value - /// is available as a part of the key hash, allowing us to decode it into anything - /// implementing [`scale_decode::DecodeAsType`]. If the key was produced using a - /// different hasher, this will return `None`. - pub fn decode_as(&self) -> Result, StorageKeyError> { - let part_info = &self.info[self.index]; - let Some(value_info) = part_info.value() else { - return Ok(None); - }; - - let value_bytes = &self.bytes[value_info.range()]; - let value_ty = *value_info.ty(); - - let decoded_key_part = T::decode_as_type(&mut &*value_bytes, value_ty, self.types) - .map_err(|e| StorageKeyError::CannotDecodeValueInKey { - index: self.index, - error: e, - })?; - - Ok(Some(decoded_key_part)) - } -} diff --git a/core/src/storage/storage_key_value.rs b/core/src/storage/storage_key_value.rs deleted file mode 100644 index 74d81b1f1f..0000000000 --- a/core/src/storage/storage_key_value.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::{Address, StorageKey, StorageValue}; -use crate::error::StorageKeyError; -use alloc::sync::Arc; -use alloc::vec::Vec; -use frame_decode::storage::StorageInfo; -use scale_info::PortableRegistry; - -/// This represents a storage key/value pair, which is typically returned from -/// iterating over values in some storage map. -#[derive(Debug)] -pub struct StorageKeyValue<'info, Addr: Address> { - key: Arc<[u8]>, - // This contains the storage information already: - value: StorageValue<'info, Addr::Value>, -} - -impl<'info, Addr: Address> StorageKeyValue<'info, Addr> { - pub(crate) fn new( - info: Arc>, - types: &'info PortableRegistry, - key_bytes: Arc<[u8]>, - value_bytes: Vec, - ) -> Self { - StorageKeyValue { - key: key_bytes, - value: StorageValue::new(info, types, value_bytes), - } - } - - /// Get the raw bytes for this storage entry's key. - pub fn key_bytes(&self) -> &[u8] { - &self.key - } - - /// Decode the key for this storage entry. This gives back a type from which we can - /// decode specific parts of the key hash (where applicable). - pub fn key(&'_ self) -> Result, StorageKeyError> { - StorageKey::new(&self.value.info, self.value.types, self.key.clone()) - } - - /// Return the storage value. - pub fn value(&self) -> &StorageValue<'info, Addr::Value> { - &self.value - } -} diff --git a/core/src/storage/storage_value.rs b/core/src/storage/storage_value.rs deleted file mode 100644 index 8cd50238a7..0000000000 --- a/core/src/storage/storage_value.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::error::StorageValueError; -use alloc::sync::Arc; -use alloc::vec::Vec; -use core::marker::PhantomData; -use frame_decode::storage::StorageInfo; -use scale_decode::DecodeAsType; -use scale_info::PortableRegistry; - -/// This represents a storage value. -#[derive(Debug)] -pub struct StorageValue<'info, Value> { - pub(crate) info: Arc>, - pub(crate) types: &'info PortableRegistry, - bytes: Vec, - marker: PhantomData, -} - -impl<'info, Value: DecodeAsType> StorageValue<'info, Value> { - pub(crate) fn new( - info: Arc>, - types: &'info PortableRegistry, - bytes: Vec, - ) -> StorageValue<'info, Value> { - StorageValue { - info, - types, - bytes, - marker: PhantomData, - } - } - - /// Get the raw bytes for this storage value. - pub fn bytes(&self) -> &[u8] { - &self.bytes - } - - /// Consume this storage value and return the raw bytes. - pub fn into_bytes(self) -> Vec { - self.bytes.to_vec() - } - - /// Decode this storage value into the provided response type. - pub fn decode(&self) -> Result { - self.decode_as::() - } - - /// Decode this storage value into an arbitrary type. - pub fn decode_as(&self) -> Result { - let cursor = &mut &*self.bytes; - - let value = frame_decode::storage::decode_storage_value_with_info( - cursor, - &self.info, - self.types, - T::into_visitor(), - ) - .map_err(StorageValueError::CannotDecode)?; - - if !cursor.is_empty() { - return Err(StorageValueError::LeftoverBytes { - bytes: cursor.to_vec(), - }); - } - - Ok(value) - } -} diff --git a/core/src/tx/mod.rs b/core/src/tx/mod.rs deleted file mode 100644 index 3eb6a29617..0000000000 --- a/core/src/tx/mod.rs +++ /dev/null @@ -1,458 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Construct and sign transactions. -//! -//! # Example -//! -//! ```rust -//! use subxt_signer::sr25519::dev; -//! use subxt_macro::subxt; -//! use subxt_core::config::{PolkadotConfig, HashFor}; -//! use subxt_core::config::DefaultExtrinsicParamsBuilder as Params; -//! use subxt_core::tx; -//! use subxt_core::utils::H256; -//! use subxt_core::Metadata; -//! -//! // If we generate types without `subxt`, we need to point to `::subxt_core`: -//! #[subxt( -//! crate = "::subxt_core", -//! runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale", -//! )] -//! pub mod polkadot {} -//! -//! // Gather some other information about the chain that we'll need to construct valid extrinsics: -//! let state = tx::ClientState:: { -//! metadata: { -//! let metadata_bytes = include_bytes!("../../../artifacts/polkadot_metadata_small.scale"); -//! Metadata::decode_from(&metadata_bytes[..]).unwrap() -//! }, -//! genesis_hash: { -//! let h = "91b171bb158e2d3848fa23a9f1c25182fb8e20313b2c1eb49219da7a70ce90c3"; -//! let bytes = hex::decode(h).unwrap(); -//! H256::from_slice(&bytes) -//! }, -//! runtime_version: tx::RuntimeVersion { -//! spec_version: 9370, -//! transaction_version: 20, -//! } -//! }; -//! -//! // Now we can build a balance transfer extrinsic. -//! let dest = dev::bob().public_key().into(); -//! let call = polkadot::tx().balances().transfer_allow_death(dest, 10_000); -//! let params = Params::new().tip(1_000).nonce(0).build(); -//! -//! // We can validate that this lines up with the given metadata: -//! tx::validate(&call, &state.metadata).unwrap(); -//! -//! // We can build a signed transaction: -//! let signed_call = tx::create_v4_signed(&call, &state, params) -//! .unwrap() -//! .sign(&dev::alice()); -//! -//! // And log it: -//! println!("Tx: 0x{}", hex::encode(signed_call.encoded())); -//! ``` - -pub mod payload; -pub mod signer; - -use crate::Metadata; -use crate::config::{Config, ExtrinsicParams, ExtrinsicParamsEncoder, HashFor, Hasher}; -use crate::error::ExtrinsicError; -use crate::utils::Encoded; -use alloc::borrow::Cow; -use alloc::string::ToString; -use alloc::vec::Vec; -use codec::{Compact, Encode}; -use payload::Payload; -use signer::Signer as SignerT; -use sp_crypto_hashing::blake2_256; - -// Expose these here since we expect them in some calls below. -pub use crate::client::{ClientState, RuntimeVersion}; - -/// Run the validation logic against some extrinsic you'd like to submit. Returns `Ok(())` -/// if the call is valid (or if it's not possible to check since the call has no validation hash). -/// Return an error if the call was not valid or something went wrong trying to validate it (ie -/// the pallet or call in question do not exist at all). -pub fn validate(call: &Call, metadata: &Metadata) -> Result<(), ExtrinsicError> { - let Some(details) = call.validation_details() else { - return Ok(()); - }; - - let pallet_name = details.pallet_name; - let call_name = details.call_name; - - let expected_hash = metadata - .pallet_by_name(pallet_name) - .ok_or_else(|| ExtrinsicError::PalletNameNotFound(pallet_name.to_string()))? - .call_hash(call_name) - .ok_or_else(|| ExtrinsicError::CallNameNotFound { - pallet_name: pallet_name.to_string(), - call_name: call_name.to_string(), - })?; - - if details.hash != expected_hash { - Err(ExtrinsicError::IncompatibleCodegen) - } else { - Ok(()) - } -} - -/// Returns the suggested transaction versions to build for a given chain, or an error -/// if Subxt doesn't support any version expected by the chain. -/// -/// If the result is [`TransactionVersion::V4`], use the `v4` methods in this module. If it's -/// [`TransactionVersion::V5`], use the `v5` ones. -pub fn suggested_version(metadata: &Metadata) -> Result { - let versions = metadata.extrinsic().supported_versions(); - - if versions.contains(&4) { - Ok(TransactionVersion::V4) - } else if versions.contains(&5) { - Ok(TransactionVersion::V5) - } else { - Err(ExtrinsicError::UnsupportedVersion) - } -} - -/// The transaction versions supported by Subxt. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -pub enum TransactionVersion { - /// v4 transactions (signed and unsigned transactions) - V4, - /// v5 transactions (bare and general transactions) - V5, -} - -/// Return the SCALE encoded bytes representing the call data of the transaction. -pub fn call_data( - call: &Call, - metadata: &Metadata, -) -> Result, ExtrinsicError> { - let mut bytes = Vec::new(); - call.encode_call_data_to(metadata, &mut bytes)?; - Ok(bytes) -} - -/// Creates a V4 "unsigned" transaction without submitting it. -pub fn create_v4_unsigned( - call: &Call, - metadata: &Metadata, -) -> Result, ExtrinsicError> { - create_unsigned_at_version(call, 4, metadata) -} - -/// Creates a V5 "bare" transaction without submitting it. -pub fn create_v5_bare( - call: &Call, - metadata: &Metadata, -) -> Result, ExtrinsicError> { - create_unsigned_at_version(call, 5, metadata) -} - -// Create a V4 "unsigned" transaction or V5 "bare" transaction. -fn create_unsigned_at_version( - call: &Call, - tx_version: u8, - metadata: &Metadata, -) -> Result, ExtrinsicError> { - // 1. Validate this call against the current node metadata if the call comes - // with a hash allowing us to do so. - validate(call, metadata)?; - - // 2. Encode extrinsic - let extrinsic = { - let mut encoded_inner = Vec::new(); - // encode the transaction version first. - tx_version.encode_to(&mut encoded_inner); - // encode call data after this byte. - call.encode_call_data_to(metadata, &mut encoded_inner)?; - // now, prefix byte length: - let len = Compact( - u32::try_from(encoded_inner.len()).expect("extrinsic size expected to be <4GB"), - ); - let mut encoded = Vec::new(); - len.encode_to(&mut encoded); - encoded.extend(encoded_inner); - encoded - }; - - // Wrap in Encoded to ensure that any more "encode" calls leave it in the right state. - Ok(Transaction::from_bytes(extrinsic)) -} - -/// Construct a v4 extrinsic, ready to be signed. -pub fn create_v4_signed( - call: &Call, - client_state: &ClientState, - params: >::Params, -) -> Result, ExtrinsicError> { - // 1. Validate this call against the current node metadata if the call comes - // with a hash allowing us to do so. - validate(call, &client_state.metadata)?; - - // 2. SCALE encode call data to bytes (pallet u8, call u8, call params). - let call_data = call_data(call, &client_state.metadata)?; - - // 3. Construct our custom additional/extra params. - let additional_and_extra_params = - >::new(client_state, params)?; - - // Return these details, ready to construct a signed extrinsic from. - Ok(PartialTransactionV4 { - call_data, - additional_and_extra_params, - }) -} - -/// Construct a v5 "general" extrinsic, ready to be signed or emitted as is. -pub fn create_v5_general( - call: &Call, - client_state: &ClientState, - params: >::Params, -) -> Result, ExtrinsicError> { - // 1. Validate this call against the current node metadata if the call comes - // with a hash allowing us to do so. - validate(call, &client_state.metadata)?; - - // 2. Work out which TX extension version to target based on metadata. - let tx_extensions_version = client_state - .metadata - .extrinsic() - .transaction_extension_version_to_use_for_encoding(); - - // 3. SCALE encode call data to bytes (pallet u8, call u8, call params). - let call_data = call_data(call, &client_state.metadata)?; - - // 4. Construct our custom additional/extra params. - let additional_and_extra_params = - >::new(client_state, params)?; - - // Return these details, ready to construct a signed extrinsic from. - Ok(PartialTransactionV5 { - call_data, - additional_and_extra_params, - tx_extensions_version, - }) -} - -/// A partially constructed V4 extrinsic, ready to be signed. -pub struct PartialTransactionV4 { - call_data: Vec, - additional_and_extra_params: T::ExtrinsicParams, -} - -impl PartialTransactionV4 { - /// Return the bytes representing the call data for this partially constructed - /// extrinsic. - pub fn call_data(&self) -> &[u8] { - &self.call_data - } - - // Obtain bytes representing the signer payload and run call some function - // with them. This can avoid an allocation in some cases. - fn with_signer_payload(&self, f: F) -> R - where - F: for<'a> FnOnce(Cow<'a, [u8]>) -> R, - { - let mut bytes = self.call_data.clone(); - self.additional_and_extra_params - .encode_signer_payload_value_to(&mut bytes); - self.additional_and_extra_params - .encode_implicit_to(&mut bytes); - - if bytes.len() > 256 { - f(Cow::Borrowed(&blake2_256(&bytes))) - } else { - f(Cow::Owned(bytes)) - } - } - - /// Return the V4 signer payload for this extrinsic. These are the bytes that must - /// be signed in order to produce a valid signature for the extrinsic. - pub fn signer_payload(&self) -> Vec { - self.with_signer_payload(|bytes| bytes.to_vec()) - } - - /// Convert this [`PartialTransactionV4`] into a V4 signed [`Transaction`], ready to submit. - /// The provided `signer` is responsible for providing the "from" address for the transaction, - /// as well as providing a signature to attach to it. - pub fn sign(&self, signer: &Signer) -> Transaction - where - Signer: SignerT, - { - // Given our signer, we can sign the payload representing this extrinsic. - let signature = self.with_signer_payload(|bytes| signer.sign(&bytes)); - // Now, use the signature and "from" address to build the extrinsic. - self.sign_with_account_and_signature(signer.account_id(), &signature) - } - - /// Convert this [`PartialTransactionV4`] into a V4 signed [`Transaction`], ready to submit. - /// The provided `address` and `signature` will be used. - pub fn sign_with_account_and_signature( - &self, - account_id: T::AccountId, - signature: &T::Signature, - ) -> Transaction { - let extrinsic = { - let mut encoded_inner = Vec::new(); - // "is signed" + transaction protocol version (4) - (0b10000000 + 4u8).encode_to(&mut encoded_inner); - // from address for signature - let address: T::Address = account_id.into(); - address.encode_to(&mut encoded_inner); - // the signature - signature.encode_to(&mut encoded_inner); - // attach custom extra params - self.additional_and_extra_params - .encode_value_to(&mut encoded_inner); - // and now, call data (remembering that it's been encoded already and just needs appending) - encoded_inner.extend(&self.call_data); - // now, prefix byte length: - let len = Compact( - u32::try_from(encoded_inner.len()).expect("extrinsic size expected to be <4GB"), - ); - let mut encoded = Vec::new(); - len.encode_to(&mut encoded); - encoded.extend(encoded_inner); - encoded - }; - - // Return an extrinsic ready to be submitted. - Transaction::from_bytes(extrinsic) - } -} - -/// A partially constructed V5 general extrinsic, ready to be signed or emitted as-is. -pub struct PartialTransactionV5 { - call_data: Vec, - additional_and_extra_params: T::ExtrinsicParams, - tx_extensions_version: u8, -} - -impl PartialTransactionV5 { - /// Return the bytes representing the call data for this partially constructed - /// extrinsic. - pub fn call_data(&self) -> &[u8] { - &self.call_data - } - - /// Return the V5 signer payload for this extrinsic. These are the bytes that must - /// be signed in order to produce a valid signature for the extrinsic. - pub fn signer_payload(&self) -> [u8; 32] { - let mut bytes = self.call_data.clone(); - - self.additional_and_extra_params - .encode_signer_payload_value_to(&mut bytes); - self.additional_and_extra_params - .encode_implicit_to(&mut bytes); - - blake2_256(&bytes) - } - - /// Convert this [`PartialTransactionV5`] into a V5 "general" [`Transaction`]. - /// - /// This transaction has not been explicitly signed. Use [`Self::sign`] - /// or [`Self::sign_with_account_and_signature`] if you wish to provide a - /// signature (this is usually a necessary step). - pub fn to_transaction(&self) -> Transaction { - let extrinsic = { - let mut encoded_inner = Vec::new(); - // "is general" + transaction protocol version (5) - (0b01000000 + 5u8).encode_to(&mut encoded_inner); - // Encode versions for the transaction extensions - self.tx_extensions_version.encode_to(&mut encoded_inner); - // Encode the actual transaction extensions values - self.additional_and_extra_params - .encode_value_to(&mut encoded_inner); - // and now, call data (remembering that it's been encoded already and just needs appending) - encoded_inner.extend(&self.call_data); - // now, prefix byte length: - let len = Compact( - u32::try_from(encoded_inner.len()).expect("extrinsic size expected to be <4GB"), - ); - let mut encoded = Vec::new(); - len.encode_to(&mut encoded); - encoded.extend(encoded_inner); - encoded - }; - - // Return an extrinsic ready to be submitted. - Transaction::from_bytes(extrinsic) - } - - /// Convert this [`PartialTransactionV5`] into a V5 "general" [`Transaction`] with a signature. - /// - /// Signing the transaction injects the signature into the transaction extension data, which is why - /// this method borrows self mutably. Signing repeatedly will override the previous signature. - pub fn sign(&mut self, signer: &Signer) -> Transaction - where - Signer: SignerT, - { - // Given our signer, we can sign the payload representing this extrinsic. - let signature = signer.sign(&self.signer_payload()); - // Now, use the signature and "from" account to build the extrinsic. - self.sign_with_account_and_signature(&signer.account_id(), &signature) - } - - /// Convert this [`PartialTransactionV5`] into a V5 "general" [`Transaction`] with a signature. - /// Prefer [`Self::sign`] if you have a [`SignerT`] instance to use. - /// - /// Signing the transaction injects the signature into the transaction extension data, which is why - /// this method borrows self mutably. Signing repeatedly will override the previous signature. - pub fn sign_with_account_and_signature( - &mut self, - account_id: &T::AccountId, - signature: &T::Signature, - ) -> Transaction { - // Inject the signature into the transaction extensions - // before constructing it. - self.additional_and_extra_params - .inject_signature(account_id, signature); - - self.to_transaction() - } -} - -/// This represents a signed transaction that's ready to be submitted. -/// Use [`Transaction::encoded()`] or [`Transaction::into_encoded()`] to -/// get the bytes for it, or [`Transaction::hash_with()`] to hash the transaction -/// given an instance of [`Config::Hasher`]. -pub struct Transaction { - encoded: Encoded, - marker: core::marker::PhantomData, -} - -impl Transaction { - /// Create a [`Transaction`] from some already-signed and prepared - /// extrinsic bytes, - pub fn from_bytes(tx_bytes: Vec) -> Self { - Self { - encoded: Encoded(tx_bytes), - marker: core::marker::PhantomData, - } - } - - /// Calculate and return the hash of the extrinsic, based on the provided hasher. - /// If you don't have a hasher to hand, you can construct one using the metadata - /// with `T::Hasher::new(&metadata)`. This will create a hasher suitable for the - /// current chain where possible. - pub fn hash_with(&self, hasher: T::Hasher) -> HashFor { - hasher.hash_of(&self.encoded) - } - - /// Returns the SCALE encoded extrinsic bytes. - pub fn encoded(&self) -> &[u8] { - &self.encoded.0 - } - - /// Consumes this [`Transaction`] and returns the SCALE encoded - /// extrinsic bytes. - pub fn into_encoded(self) -> Vec { - self.encoded.0 - } -} diff --git a/core/src/tx/payload.rs b/core/src/tx/payload.rs deleted file mode 100644 index e87c38a98f..0000000000 --- a/core/src/tx/payload.rs +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module contains the trait and types used to represent -//! transactions that can be submitted. - -use crate::Metadata; -use crate::error::ExtrinsicError; -use alloc::borrow::Cow; -use alloc::boxed::Box; -use alloc::string::{String, ToString}; - -use alloc::vec::Vec; -use codec::Encode; -use scale_encode::EncodeAsFields; -use scale_value::{Composite, Value, ValueDef, Variant}; - -/// This represents a transaction payload that can be submitted -/// to a node. -pub trait Payload { - /// Encode call data to the provided output. - fn encode_call_data_to( - &self, - metadata: &Metadata, - out: &mut Vec, - ) -> Result<(), ExtrinsicError>; - - /// Encode call data and return the output. This is a convenience - /// wrapper around [`Payload::encode_call_data_to`]. - fn encode_call_data(&self, metadata: &Metadata) -> Result, ExtrinsicError> { - let mut v = Vec::new(); - self.encode_call_data_to(metadata, &mut v)?; - Ok(v) - } - - /// Returns the details needed to validate the call, which - /// include a statically generated hash, the pallet name, - /// and the call name. - fn validation_details(&self) -> Option> { - None - } -} - -macro_rules! boxed_payload { - ($ty:path) => { - impl Payload for $ty { - fn encode_call_data_to( - &self, - metadata: &Metadata, - out: &mut Vec, - ) -> Result<(), ExtrinsicError> { - self.as_ref().encode_call_data_to(metadata, out) - } - fn encode_call_data(&self, metadata: &Metadata) -> Result, ExtrinsicError> { - self.as_ref().encode_call_data(metadata) - } - fn validation_details(&self) -> Option> { - self.as_ref().validation_details() - } - } - }; -} - -boxed_payload!(Box); -#[cfg(feature = "std")] -boxed_payload!(std::sync::Arc); -#[cfg(feature = "std")] -boxed_payload!(std::rc::Rc); - -/// Details required to validate the shape of a transaction payload against some metadata. -pub struct ValidationDetails<'a> { - /// The pallet name. - pub pallet_name: &'a str, - /// The call name. - pub call_name: &'a str, - /// A hash (this is generated at compile time in our codegen) - /// to compare against the runtime code. - pub hash: [u8; 32], -} - -/// A transaction payload containing some generic `CallData`. -#[derive(Clone, Debug, Eq, Ord, PartialEq, PartialOrd)] -pub struct DefaultPayload { - pallet_name: Cow<'static, str>, - call_name: Cow<'static, str>, - call_data: CallData, - validation_hash: Option<[u8; 32]>, -} - -/// The payload type used by static codegen. -pub type StaticPayload = DefaultPayload; -/// The type of a payload typically used for dynamic transaction payloads. -pub type DynamicPayload = DefaultPayload>; - -impl DefaultPayload { - /// Create a new [`DefaultPayload`]. - pub fn new( - pallet_name: impl Into, - call_name: impl Into, - call_data: CallData, - ) -> Self { - DefaultPayload { - pallet_name: Cow::Owned(pallet_name.into()), - call_name: Cow::Owned(call_name.into()), - call_data, - validation_hash: None, - } - } - - /// Create a new [`DefaultPayload`] using static strings for the pallet and call name. - /// This is only expected to be used from codegen. - #[doc(hidden)] - pub fn new_static( - pallet_name: &'static str, - call_name: &'static str, - call_data: CallData, - validation_hash: [u8; 32], - ) -> Self { - DefaultPayload { - pallet_name: Cow::Borrowed(pallet_name), - call_name: Cow::Borrowed(call_name), - call_data, - validation_hash: Some(validation_hash), - } - } - - /// Do not validate this call prior to submitting it. - pub fn unvalidated(self) -> Self { - Self { - validation_hash: None, - ..self - } - } - - /// Returns the call data. - pub fn call_data(&self) -> &CallData { - &self.call_data - } - - /// Returns the pallet name. - pub fn pallet_name(&self) -> &str { - &self.pallet_name - } - - /// Returns the call name. - pub fn call_name(&self) -> &str { - &self.call_name - } -} - -impl DefaultPayload> { - /// Convert the dynamic `Composite` payload into a [`Value`]. - /// This is useful if you want to use this as an argument for a - /// larger dynamic call that wants to use this as a nested call. - pub fn into_value(self) -> Value<()> { - let call = Value { - context: (), - value: ValueDef::Variant(Variant { - name: self.call_name.into_owned(), - values: self.call_data, - }), - }; - - Value::unnamed_variant(self.pallet_name, [call]) - } -} - -impl Payload for DefaultPayload { - fn encode_call_data_to( - &self, - metadata: &Metadata, - out: &mut Vec, - ) -> Result<(), ExtrinsicError> { - let pallet = metadata - .pallet_by_name(&self.pallet_name) - .ok_or_else(|| ExtrinsicError::PalletNameNotFound(self.pallet_name.to_string()))?; - let call = pallet - .call_variant_by_name(&self.call_name) - .ok_or_else(|| ExtrinsicError::CallNameNotFound { - pallet_name: pallet.name().to_string(), - call_name: self.call_name.to_string(), - })?; - - let pallet_index = pallet.call_index(); - let call_index = call.index; - - pallet_index.encode_to(out); - call_index.encode_to(out); - - let mut fields = call - .fields - .iter() - .map(|f| scale_encode::Field::new(f.ty.id, f.name.as_deref())); - - self.call_data - .encode_as_fields_to(&mut fields, metadata.types(), out) - .map_err(ExtrinsicError::CannotEncodeCallData)?; - Ok(()) - } - - fn validation_details(&self) -> Option> { - self.validation_hash.map(|hash| ValidationDetails { - pallet_name: &self.pallet_name, - call_name: &self.call_name, - hash, - }) - } -} - -/// Construct a transaction at runtime; essentially an alias to [`DefaultPayload::new()`] -/// which provides a [`Composite`] value for the call data. -pub fn dynamic( - pallet_name: impl Into, - call_name: impl Into, - call_data: impl Into>, -) -> DynamicPayload { - DefaultPayload::new(pallet_name, call_name, call_data.into()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::Metadata; - use codec::Decode; - use scale_value::Composite; - - fn test_metadata() -> Metadata { - let metadata_bytes = include_bytes!("../../../artifacts/polkadot_metadata_small.scale"); - Metadata::decode(&mut &metadata_bytes[..]).expect("Valid metadata") - } - - #[test] - fn encode_call_with_incompatible_types_returns_error() { - let metadata = test_metadata(); - - let incompatible_data = Composite::named([ - ("dest", scale_value::Value::bool(true)), // Boolean instead of MultiAddress - ("value", scale_value::Value::string("not_a_number")), // String instead of u128 - ]); - - let payload = DefaultPayload::new("Balances", "transfer_allow_death", incompatible_data); - - let mut out = Vec::new(); - let result = payload.encode_call_data_to(&metadata, &mut out); - - assert!( - result.is_err(), - "Expected error when encoding with incompatible types" - ); - } - - #[test] - fn encode_call_with_valid_data_succeeds() { - let metadata = test_metadata(); - - // Create a valid payload to ensure our error handling doesn't break valid cases - // For MultiAddress, we'll use the Id variant with a 32-byte account - let valid_address = - scale_value::Value::unnamed_variant("Id", [scale_value::Value::from_bytes([0u8; 32])]); - - let valid_data = Composite::named([ - ("dest", valid_address), - ("value", scale_value::Value::u128(1000)), - ]); - - let payload = DefaultPayload::new("Balances", "transfer_allow_death", valid_data); - - // This should succeed - let mut out = Vec::new(); - let result = payload.encode_call_data_to(&metadata, &mut out); - - assert!( - result.is_ok(), - "Expected success when encoding with valid data" - ); - assert!(!out.is_empty(), "Expected encoded output to be non-empty"); - } -} diff --git a/core/src/tx/signer.rs b/core/src/tx/signer.rs deleted file mode 100644 index 82dca378d1..0000000000 --- a/core/src/tx/signer.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! A library to **sub**mit e**xt**rinsics to a -//! [substrate](https://github.com/paritytech/substrate) node via RPC. - -use crate::Config; - -/// Signing transactions requires a [`Signer`]. This is responsible for -/// providing the "from" account that the transaction is being signed by, -/// as well as actually signing a SCALE encoded payload. -pub trait Signer { - /// Return the "from" account ID. - fn account_id(&self) -> T::AccountId; - - /// Takes a signer payload for an extrinsic, and returns a signature based on it. - /// - /// Some signers may fail, for instance because the hardware on which the keys are located has - /// refused the operation. - fn sign(&self, signer_payload: &[u8]) -> T::Signature; -} diff --git a/core/src/utils/account_id20.rs b/core/src/utils/account_id20.rs deleted file mode 100644 index 136e217a51..0000000000 --- a/core/src/utils/account_id20.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! `AccountId20` is a representation of Ethereum address derived from hashing the public key. - -use alloc::format; -use alloc::string::String; -use codec::{Decode, Encode}; -use keccak_hash::keccak; -use serde::{Deserialize, Serialize}; -use thiserror::Error as DeriveError; - -#[derive( - Copy, - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Encode, - Decode, - Debug, - scale_encode::EncodeAsType, - scale_decode::DecodeAsType, - scale_info::TypeInfo, -)] -/// Ethereum-compatible `AccountId`. -pub struct AccountId20(pub [u8; 20]); - -impl AsRef<[u8]> for AccountId20 { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsRef<[u8; 20]> for AccountId20 { - fn as_ref(&self) -> &[u8; 20] { - &self.0 - } -} - -impl From<[u8; 20]> for AccountId20 { - fn from(x: [u8; 20]) -> Self { - AccountId20(x) - } -} - -impl AccountId20 { - /// Convert to a public key hash - pub fn checksum(&self) -> String { - let hex_address = hex::encode(self.0); - let hash = keccak(hex_address.as_bytes()); - - let mut checksum_address = String::with_capacity(42); - checksum_address.push_str("0x"); - - for (i, ch) in hex_address.chars().enumerate() { - // Get the corresponding nibble from the hash - let nibble = (hash[i / 2] >> (if i % 2 == 0 { 4 } else { 0 })) & 0xf; - - if nibble >= 8 { - checksum_address.push(ch.to_ascii_uppercase()); - } else { - checksum_address.push(ch); - } - } - - checksum_address - } -} - -/// An error obtained from trying to interpret a hex encoded string into an AccountId20 -#[derive(Clone, Copy, Eq, PartialEq, Debug, DeriveError)] -#[allow(missing_docs)] -pub enum FromChecksumError { - #[error("Length is bad")] - BadLength, - #[error("Invalid checksum")] - InvalidChecksum, - #[error("Invalid checksum prefix byte.")] - InvalidPrefix, -} - -impl Serialize for AccountId20 { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(&self.checksum()) - } -} - -impl<'de> Deserialize<'de> for AccountId20 { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - String::deserialize(deserializer)? - .parse::() - .map_err(|e| serde::de::Error::custom(format!("{e:?}"))) - } -} - -impl core::fmt::Display for AccountId20 { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", self.checksum()) - } -} - -impl core::str::FromStr for AccountId20 { - type Err = FromChecksumError; - fn from_str(s: &str) -> Result { - if s.len() != 42 { - return Err(FromChecksumError::BadLength); - } - if !s.starts_with("0x") { - return Err(FromChecksumError::InvalidPrefix); - } - hex::decode(&s.as_bytes()[2..]) - .map_err(|_| FromChecksumError::InvalidChecksum)? - .try_into() - .map(AccountId20) - .map_err(|_| FromChecksumError::BadLength) - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn deserialisation() { - let key_hashes = vec![ - "0xf24FF3a9CF04c71Dbc94D0b566f7A27B94566cac", - "0x3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0", - "0x798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc", - "0x773539d4Ac0e786233D90A233654ccEE26a613D9", - "0xFf64d3F6efE2317EE2807d223a0Bdc4c0c49dfDB", - "0xC0F0f4ab324C46e55D02D0033343B4Be8A55532d", - ]; - - for key_hash in key_hashes { - let parsed: AccountId20 = key_hash.parse().expect("Failed to parse"); - - let encoded = parsed.checksum(); - - // `encoded` should be equal to the initial key_hash - assert_eq!(encoded, key_hash); - } - } -} diff --git a/core/src/utils/bits.rs b/core/src/utils/bits.rs deleted file mode 100644 index 93ba0f4af3..0000000000 --- a/core/src/utils/bits.rs +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Generic `scale_bits` over `bitvec`-like `BitOrder` and `BitFormat` types. - -use alloc::vec; -use alloc::vec::Vec; -use codec::{Compact, Input}; -use core::marker::PhantomData; -use scale_bits::{ - Bits, - scale::format::{Format, OrderFormat, StoreFormat}, -}; -use scale_decode::{IntoVisitor, TypeResolver}; - -/// Associates `bitvec::store::BitStore` trait with corresponding, type-erased `scale_bits::StoreFormat` enum. -/// -/// Used to decode bit sequences by providing `scale_bits::StoreFormat` using -/// `bitvec`-like type type parameters. -pub trait BitStore { - /// Corresponding `scale_bits::StoreFormat` value. - const FORMAT: StoreFormat; - /// Number of bits that the backing store types holds. - const BITS: u32; -} -macro_rules! impl_store { - ($ty:ident, $wrapped:ty) => { - impl BitStore for $wrapped { - const FORMAT: StoreFormat = StoreFormat::$ty; - const BITS: u32 = <$wrapped>::BITS; - } - }; -} -impl_store!(U8, u8); -impl_store!(U16, u16); -impl_store!(U32, u32); -impl_store!(U64, u64); - -/// Associates `bitvec::order::BitOrder` trait with corresponding, type-erased `scale_bits::OrderFormat` enum. -/// -/// Used to decode bit sequences in runtime by providing `scale_bits::OrderFormat` using -/// `bitvec`-like type type parameters. -pub trait BitOrder { - /// Corresponding `scale_bits::OrderFormat` value. - const FORMAT: OrderFormat; -} -macro_rules! impl_order { - ($ty:ident) => { - #[doc = concat!("Type-level value that corresponds to `scale_bits::OrderFormat::", stringify!($ty), "` at run-time")] - #[doc = concat!(" and `bitvec::order::BitOrder::", stringify!($ty), "` at the type level.")] - #[derive(Clone, Debug, PartialEq, Eq)] - pub enum $ty {} - impl BitOrder for $ty { - const FORMAT: OrderFormat = OrderFormat::$ty; - } - }; -} -impl_order!(Lsb0); -impl_order!(Msb0); - -/// Constructs a run-time format parameters based on the corresponding type-level parameters. -fn bit_format() -> Format { - Format { - order: Order::FORMAT, - store: Store::FORMAT, - } -} - -/// `scale_bits::Bits` generic over the bit store (`u8`/`u16`/`u32`/`u64`) and bit order (LSB, MSB) -/// used for SCALE encoding/decoding. Uses `scale_bits::Bits`-default `u8` and LSB format underneath. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct DecodedBits { - bits: Bits, - _marker: PhantomData<(Store, Order)>, -} - -impl DecodedBits { - /// Extracts the underlying `scale_bits::Bits` value. - pub fn into_bits(self) -> Bits { - self.bits - } - - /// References the underlying `scale_bits::Bits` value. - pub fn as_bits(&self) -> &Bits { - &self.bits - } -} - -impl core::iter::FromIterator for DecodedBits { - fn from_iter>(iter: T) -> Self { - DecodedBits { - bits: Bits::from_iter(iter), - _marker: PhantomData, - } - } -} - -impl codec::Decode for DecodedBits { - fn decode(input: &mut I) -> Result { - /// Equivalent of `BitSlice::MAX_BITS` on 32bit machine. - const ARCH32BIT_BITSLICE_MAX_BITS: u32 = 0x1fff_ffff; - - let Compact(bits) = >::decode(input)?; - // Otherwise it is impossible to store it on 32bit machine. - if bits > ARCH32BIT_BITSLICE_MAX_BITS { - return Err("Attempt to decode a BitVec with too many bits".into()); - } - // NOTE: Replace with `bits.div_ceil(Store::BITS)` if `int_roundings` is stabilised - let elements = (bits / Store::BITS) + u32::from(bits % Store::BITS != 0); - let bytes_in_elem = Store::BITS.saturating_div(u8::BITS); - let bytes_needed = (elements * bytes_in_elem) as usize; - - // NOTE: We could reduce allocations if it would be possible to directly - // decode from an `Input` type using a custom format (rather than default ) - // for the `Bits` type. - let mut storage = codec::Encode::encode(&Compact(bits)); - let prefix_len = storage.len(); - storage.reserve_exact(bytes_needed); - storage.extend(vec![0; bytes_needed]); - input.read(&mut storage[prefix_len..])?; - - let decoder = scale_bits::decode_using_format_from(&storage, bit_format::())?; - let bits = decoder.collect::, _>>()?; - let bits = Bits::from_iter(bits); - - Ok(DecodedBits { - bits, - _marker: PhantomData, - }) - } -} - -impl codec::Encode for DecodedBits { - fn size_hint(&self) -> usize { - self.bits.size_hint() - } - - fn encoded_size(&self) -> usize { - self.bits.encoded_size() - } - - fn encode(&self) -> Vec { - scale_bits::encode_using_format(self.bits.iter(), bit_format::()) - } -} - -#[doc(hidden)] -pub struct DecodedBitsVisitor(core::marker::PhantomData<(S, O, R)>); - -impl scale_decode::Visitor for DecodedBitsVisitor { - type Value<'scale, 'info> = DecodedBits; - type Error = scale_decode::Error; - type TypeResolver = R; - - fn unchecked_decode_as_type<'scale, 'info>( - self, - input: &mut &'scale [u8], - type_id: R::TypeId, - types: &'info R, - ) -> scale_decode::visitor::DecodeAsTypeResult< - Self, - Result, Self::Error>, - > { - let res = - scale_decode::visitor::decode_with_visitor(input, type_id, types, Bits::into_visitor()) - .map(|bits| DecodedBits { - bits, - _marker: PhantomData, - }); - scale_decode::visitor::DecodeAsTypeResult::Decoded(res) - } -} -impl scale_decode::IntoVisitor for DecodedBits { - type AnyVisitor = DecodedBitsVisitor; - fn into_visitor() -> DecodedBitsVisitor { - DecodedBitsVisitor(PhantomData) - } -} - -impl scale_encode::EncodeAsType for DecodedBits { - fn encode_as_type_to( - &self, - type_id: R::TypeId, - types: &R, - out: &mut Vec, - ) -> Result<(), scale_encode::Error> { - self.bits.encode_as_type_to(type_id, types, out) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use core::fmt::Debug; - - use bitvec::vec::BitVec; - use codec::Decode as _; - - // NOTE: We don't use `bitvec::order` types in our implementation, since we - // don't want to depend on `bitvec`. Rather than reimplementing the unsafe - // trait on our types here for testing purposes, we simply convert and - // delegate to `bitvec`'s own types. - trait ToBitVec { - type Order: bitvec::order::BitOrder; - } - impl ToBitVec for Lsb0 { - type Order = bitvec::order::Lsb0; - } - impl ToBitVec for Msb0 { - type Order = bitvec::order::Msb0; - } - - fn scales_like_bitvec_and_roundtrips< - 'a, - Store: BitStore + bitvec::store::BitStore + PartialEq, - Order: BitOrder + ToBitVec + Debug + PartialEq, - >( - input: impl IntoIterator, - ) where - BitVec::Order>: codec::Encode + codec::Decode, - { - let input: Vec<_> = input.into_iter().copied().collect(); - - let decoded_bits = DecodedBits::::from_iter(input.clone()); - let bitvec = BitVec::::Order>::from_iter(input); - - let decoded_bits_encoded = codec::Encode::encode(&decoded_bits); - let bitvec_encoded = codec::Encode::encode(&bitvec); - assert_eq!(decoded_bits_encoded, bitvec_encoded); - - let decoded_bits_decoded = - DecodedBits::::decode(&mut &decoded_bits_encoded[..]) - .expect("SCALE-encoding DecodedBits to roundtrip"); - let bitvec_decoded = - BitVec::::Order>::decode(&mut &bitvec_encoded[..]) - .expect("SCALE-encoding BitVec to roundtrip"); - assert_eq!(decoded_bits, decoded_bits_decoded); - assert_eq!(bitvec, bitvec_decoded); - } - - #[test] - fn decoded_bitvec_scales_and_roundtrips() { - let test_cases = [ - vec![], - vec![true], - vec![false], - vec![true, false, true], - vec![true, false, true, false, false, false, false, false, true], - [vec![true; 5], vec![false; 5], vec![true; 1], vec![false; 3]].concat(), - [vec![true; 9], vec![false; 9], vec![true; 9], vec![false; 9]].concat(), - ]; - - for test_case in &test_cases { - scales_like_bitvec_and_roundtrips::(test_case); - scales_like_bitvec_and_roundtrips::(test_case); - scales_like_bitvec_and_roundtrips::(test_case); - scales_like_bitvec_and_roundtrips::(test_case); - scales_like_bitvec_and_roundtrips::(test_case); - scales_like_bitvec_and_roundtrips::(test_case); - scales_like_bitvec_and_roundtrips::(test_case); - scales_like_bitvec_and_roundtrips::(test_case); - } - } -} diff --git a/core/src/utils/era.rs b/core/src/utils/era.rs deleted file mode 100644 index 43875fcdab..0000000000 --- a/core/src/utils/era.rs +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use alloc::{format, vec::Vec}; -use codec::{Decode, Encode}; -use scale_decode::{ - IntoVisitor, TypeResolver, Visitor, - ext::scale_type_resolver, - visitor::{TypeIdFor, types::Composite, types::Variant}, -}; -use scale_encode::EncodeAsType; - -// Dev note: This and related bits taken from `sp_runtime::generic::Era` -/// An era to describe the longevity of a transaction. -#[derive( - PartialEq, - Default, - Eq, - Clone, - Copy, - Debug, - serde::Serialize, - serde::Deserialize, - scale_info::TypeInfo, -)] -pub enum Era { - /// The transaction is valid forever. The genesis hash must be present in the signed content. - #[default] - Immortal, - - /// The transaction will expire. Use [`Era::mortal`] to construct this with correct values. - /// - /// When used on `FRAME`-based runtimes, `period` cannot exceed `BlockHashCount` parameter - /// of `system` module. - Mortal { - /// The number of blocks that the tx will be valid for after the checkpoint block - /// hash found in the signer payload. - period: u64, - /// The phase in the period that this transaction's lifetime begins (and, importantly, - /// implies which block hash is included in the signature material). If the `period` is - /// greater than 1 << 12, then it will be a factor of the times greater than 1<<12 that - /// `period` is. - phase: u64, - }, -} - -// E.g. with period == 4: -// 0 10 20 30 40 -// 0123456789012345678901234567890123456789012 -// |...| -// authored -/ \- expiry -// phase = 1 -// n = Q(current - phase, period) + phase -impl Era { - /// Create a new era based on a period (which should be a power of two between 4 and 65536 - /// inclusive) and a block number on which it should start (or, for long periods, be shortly - /// after the start). - /// - /// If using `Era` in the context of `FRAME` runtime, make sure that `period` - /// does not exceed `BlockHashCount` parameter passed to `system` module, since that - /// prunes old blocks and renders transactions immediately invalid. - pub fn mortal(period: u64, current: u64) -> Self { - let period = period - .checked_next_power_of_two() - .unwrap_or(1 << 16) - .clamp(4, 1 << 16); - let phase = current % period; - let quantize_factor = (period >> 12).max(1); - let quantized_phase = phase / quantize_factor * quantize_factor; - - Self::Mortal { - period, - phase: quantized_phase, - } - } -} - -// Both copied from `sp_runtime::generic::Era`; this is the wire interface and so -// it's really the most important bit here. -impl codec::Encode for Era { - fn encode_to(&self, output: &mut T) { - match self { - Self::Immortal => output.push_byte(0), - Self::Mortal { period, phase } => { - let quantize_factor = (*period >> 12).max(1); - let encoded = (period.trailing_zeros() - 1).clamp(1, 15) as u16 - | ((phase / quantize_factor) << 4) as u16; - encoded.encode_to(output); - } - } - } -} -impl codec::Decode for Era { - fn decode(input: &mut I) -> Result { - let first = input.read_byte()?; - if first == 0 { - Ok(Self::Immortal) - } else { - let encoded = first as u64 + ((input.read_byte()? as u64) << 8); - let period = 2 << (encoded % (1 << 4)); - let quantize_factor = (period >> 12).max(1); - let phase = (encoded >> 4) * quantize_factor; - if period >= 4 && phase < period { - Ok(Self::Mortal { period, phase }) - } else { - Err("Invalid period and phase".into()) - } - } - } -} - -/// Define manually how to encode an Era given some type information. Here we -/// basically check that the type we're targeting is called "Era" and then codec::Encode. -impl EncodeAsType for Era { - fn encode_as_type_to( - &self, - type_id: R::TypeId, - types: &R, - out: &mut Vec, - ) -> Result<(), scale_encode::Error> { - // Visit the type to check that it is an Era. This is only a rough check. - let visitor = scale_type_resolver::visitor::new((), |_, _| false) - .visit_variant(|_, path, _variants| path.last() == Some("Era")); - - let is_era = types - .resolve_type(type_id.clone(), visitor) - .unwrap_or_default(); - if !is_era { - return Err(scale_encode::Error::custom_string(format!( - "Type {type_id:?} is not a valid Era type; expecting either Immortal or MortalX variant" - ))); - } - - // if the type looks valid then just scale encode our Era. - self.encode_to(out); - Ok(()) - } -} - -/// Define manually how to decode an Era given some type information. Here we check that the -/// variant we're decoding is one of the expected Era variants, and that the field is correct if so, -/// ensuring that this will fail if trying to decode something that isn't an Era. -pub struct EraVisitor(core::marker::PhantomData); - -impl IntoVisitor for Era { - type AnyVisitor = EraVisitor; - fn into_visitor() -> Self::AnyVisitor { - EraVisitor(core::marker::PhantomData) - } -} - -impl Visitor for EraVisitor { - type Value<'scale, 'resolver> = Era; - type Error = scale_decode::Error; - type TypeResolver = R; - - // Unwrap any newtype wrappers around the era, eg the CheckMortality extension (which actually - // has 2 fields, but scale_info seems to automatically ignore the PhantomData field). This - // allows us to decode directly from CheckMortality into Era. - fn visit_composite<'scale, 'resolver>( - self, - value: &mut Composite<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - if value.remaining() != 1 { - return Err(scale_decode::Error::custom_string(format!( - "Expected any wrapper around Era to have exactly one field, but got {} fields", - value.remaining() - ))); - } - - value - .decode_item(self) - .expect("1 field expected; checked above.") - } - - fn visit_variant<'scale, 'resolver>( - self, - value: &mut Variant<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - let variant = value.name(); - - // If the variant is immortal, we know the outcome. - if variant == "Immortal" { - return Ok(Era::Immortal); - } - - // Otherwise, we expect a variant Mortal1..Mortal255 where the number - // here is the first byte, and the second byte is conceptually a field of this variant. - // This weird encoding is because the Era is compressed to just 1 byte if immortal and - // just 2 bytes if mortal. - // - // Note: We _could_ just assume we'll have 2 bytes to work with and decode the era directly, - // but checking the variant names ensures that the thing we think is an Era actually _is_ - // one, based on the type info for it. - let first_byte = variant - .strip_prefix("Mortal") - .and_then(|s| s.parse::().ok()) - .ok_or_else(|| { - scale_decode::Error::custom_string(format!( - "Expected MortalX variant, but got {variant}" - )) - })?; - - // We need 1 field in the MortalN variant containing the second byte. - let mortal_fields = value.fields(); - if mortal_fields.remaining() != 1 { - return Err(scale_decode::Error::custom_string(format!( - "Expected Mortal{} to have one u8 field, but got {} fields", - first_byte, - mortal_fields.remaining() - ))); - } - - let second_byte = mortal_fields - .decode_item(u8::into_visitor()) - .expect("At least one field should exist; checked above.") - .map_err(|e| { - scale_decode::Error::custom_string(format!( - "Expected mortal variant field to be u8, but: {e}" - )) - })?; - - // Now that we have both bytes we can decode them into the era using - // the same logic as the codec::Decode impl does. - Era::decode(&mut &[first_byte, second_byte][..]).map_err(|e| { - scale_decode::Error::custom_string(format!( - "Failed to codec::Decode Era from Mortal bytes: {e}" - )) - }) - } -} diff --git a/core/src/utils/mod.rs b/core/src/utils/mod.rs deleted file mode 100644 index e739f9f6c0..0000000000 --- a/core/src/utils/mod.rs +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Miscellaneous utility helpers. - -mod account_id; -mod account_id20; -pub mod bits; -mod era; -mod multi_address; -mod multi_signature; -mod static_type; -mod unchecked_extrinsic; -mod wrapper_opaque; -mod yesnomaybe; - -use alloc::borrow::ToOwned; -use alloc::format; -use alloc::string::String; -use alloc::vec::Vec; -use codec::{Compact, Decode, Encode}; -use derive_where::derive_where; - -pub use account_id::AccountId32; -pub use account_id20::AccountId20; -pub use era::Era; -pub use multi_address::MultiAddress; -pub use multi_signature::MultiSignature; -pub use primitive_types::{H160, H256, H512}; -pub use static_type::Static; -pub use unchecked_extrinsic::UncheckedExtrinsic; -pub use wrapper_opaque::WrapperKeepOpaque; -pub use yesnomaybe::{Maybe, No, NoMaybe, Yes, YesMaybe, YesNo}; - -/// Wraps an already encoded byte vector, prevents being encoded as a raw byte vector as part of -/// the transaction payload -#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] -pub struct Encoded(pub Vec); - -impl codec::Encode for Encoded { - fn encode(&self) -> Vec { - self.0.to_owned() - } -} - -/// Decodes a compact encoded value from the beginning of the provided bytes, -/// returning the value and any remaining bytes. -pub fn strip_compact_prefix(bytes: &[u8]) -> Result<(u64, &[u8]), codec::Error> { - let cursor = &mut &*bytes; - let val = >::decode(cursor)?; - Ok((val.0, *cursor)) -} - -/// A version of [`core::marker::PhantomData`] that is also Send and Sync (which is fine -/// because regardless of the generic param, it is always possible to Send + Sync this -/// 0 size type). -#[derive(Encode, Decode, scale_info::TypeInfo)] -#[derive_where(Clone, PartialEq, Debug, Eq, Default, Hash)] -#[scale_info(skip_type_params(T))] -#[doc(hidden)] -pub struct PhantomDataSendSync(core::marker::PhantomData); - -impl PhantomDataSendSync { - pub fn new() -> Self { - Self(core::marker::PhantomData) - } -} - -unsafe impl Send for PhantomDataSendSync {} -unsafe impl Sync for PhantomDataSendSync {} - -/// This represents a key-value collection and is SCALE compatible -/// with collections like BTreeMap. This has the same type params -/// as `BTreeMap` which allows us to easily swap the two during codegen. -pub type KeyedVec = Vec<(K, V)>; - -/// A quick helper to encode some bytes to hex. -pub fn to_hex(bytes: impl AsRef<[u8]>) -> String { - format!("0x{}", hex::encode(bytes.as_ref())) -} diff --git a/core/src/utils/multi_address.rs b/core/src/utils/multi_address.rs deleted file mode 100644 index 95506bc483..0000000000 --- a/core/src/utils/multi_address.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! The "default" Substrate/Polkadot Address type. This is used in codegen, as well as signing related bits. -//! This doesn't contain much functionality itself, but is easy to convert to/from an `sp_runtime::MultiAddress` -//! for instance, to gain functionality without forcing a dependency on Substrate crates here. - -use alloc::vec::Vec; -use codec::{Decode, Encode}; - -/// A multi-format address wrapper for on-chain accounts. This is a simplified version of Substrate's -/// `sp_runtime::MultiAddress`. -#[derive( - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Encode, - Decode, - Debug, - scale_encode::EncodeAsType, - scale_decode::DecodeAsType, - scale_info::TypeInfo, -)] -pub enum MultiAddress { - /// It's an account ID (pubkey). - Id(AccountId), - /// It's an account index. - Index(#[codec(compact)] AccountIndex), - /// It's some arbitrary raw bytes. - Raw(Vec), - /// It's a 32 byte representation. - Address32([u8; 32]), - /// Its a 20 byte representation. - Address20([u8; 20]), -} - -impl From for MultiAddress { - fn from(a: AccountId) -> Self { - Self::Id(a) - } -} diff --git a/core/src/utils/static_type.rs b/core/src/utils/static_type.rs deleted file mode 100644 index e27b9c5bc0..0000000000 --- a/core/src/utils/static_type.rs +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use codec::{Decode, Encode}; -use scale_decode::{IntoVisitor, TypeResolver, Visitor, visitor::DecodeAsTypeResult}; -use scale_encode::EncodeAsType; - -use alloc::vec::Vec; - -/// If the type inside this implements [`Encode`], this will implement [`scale_encode::EncodeAsType`]. -/// If the type inside this implements [`Decode`], this will implement [`scale_decode::DecodeAsType`]. -/// -/// In either direction, we ignore any type information and just attempt to encode/decode statically -/// via the [`Encode`] and [`Decode`] implementations. This can be useful as an adapter for types which -/// do not implement [`scale_encode::EncodeAsType`] and [`scale_decode::DecodeAsType`] themselves, but -/// it's best to avoid using it where possible as it will not take into account any type information, -/// and is thus more likely to encode or decode incorrectly. -#[derive(Debug, Encode, Decode, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] -pub struct Static(pub T); - -impl EncodeAsType for Static { - fn encode_as_type_to( - &self, - _type_id: R::TypeId, - _types: &R, - out: &mut Vec, - ) -> Result<(), scale_encode::Error> { - self.0.encode_to(out); - Ok(()) - } -} - -pub struct StaticDecodeAsTypeVisitor(core::marker::PhantomData<(T, R)>); - -impl Visitor for StaticDecodeAsTypeVisitor { - type Value<'scale, 'info> = Static; - type Error = scale_decode::Error; - type TypeResolver = R; - - fn unchecked_decode_as_type<'scale, 'info>( - self, - input: &mut &'scale [u8], - _type_id: R::TypeId, - _types: &'info R, - ) -> DecodeAsTypeResult, Self::Error>> { - use scale_decode::{Error, visitor::DecodeError}; - let decoded = T::decode(input) - .map(Static) - .map_err(|e| Error::new(DecodeError::CodecError(e).into())); - DecodeAsTypeResult::Decoded(decoded) - } -} - -impl IntoVisitor for Static { - type AnyVisitor = StaticDecodeAsTypeVisitor; - fn into_visitor() -> StaticDecodeAsTypeVisitor { - StaticDecodeAsTypeVisitor(core::marker::PhantomData) - } -} - -// Make it easy to convert types into Static where required. -impl From for Static { - fn from(value: T) -> Self { - Static(value) - } -} - -// Static is just a marker type and should be as transparent as possible: -impl core::ops::Deref for Static { - type Target = T; - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl core::ops::DerefMut for Static { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} diff --git a/core/src/utils/unchecked_extrinsic.rs b/core/src/utils/unchecked_extrinsic.rs deleted file mode 100644 index caafe20750..0000000000 --- a/core/src/utils/unchecked_extrinsic.rs +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! The "default" Substrate/Polkadot UncheckedExtrinsic. -//! This is used in codegen for runtime API calls. -//! -//! The inner bytes represent the encoded extrinsic expected by the -//! runtime APIs. Deriving `EncodeAsType` would lead to the inner -//! bytes to be re-encoded (length prefixed). - -use core::marker::PhantomData; - -use codec::{Decode, Encode}; -use scale_decode::{DecodeAsType, IntoVisitor, TypeResolver, Visitor, visitor::DecodeAsTypeResult}; - -use super::{Encoded, Static}; -use alloc::vec::Vec; - -/// The unchecked extrinsic from substrate. -#[derive(Clone, Debug, Eq, PartialEq, Encode)] -pub struct UncheckedExtrinsic( - Static, - #[codec(skip)] PhantomData<(Address, Call, Signature, Extra)>, -); - -impl UncheckedExtrinsic { - /// Construct a new [`UncheckedExtrinsic`]. - pub fn new(bytes: Vec) -> Self { - Self(Static(Encoded(bytes)), PhantomData) - } - - /// Get the bytes of the encoded extrinsic. - pub fn bytes(&self) -> &[u8] { - self.0.0.0.as_slice() - } -} - -impl Decode - for UncheckedExtrinsic -{ - fn decode(input: &mut I) -> Result { - // The bytes for an UncheckedExtrinsic are first a compact - // encoded length, and then the bytes following. This is the - // same encoding as a Vec, so easiest ATM is just to decode - // into that, and then encode the vec bytes to get our extrinsic - // bytes, which we save into an `Encoded` to preserve as-is. - let xt_vec: Vec = Decode::decode(input)?; - Ok(UncheckedExtrinsic::new(xt_vec)) - } -} - -impl scale_encode::EncodeAsType - for UncheckedExtrinsic -{ - fn encode_as_type_to( - &self, - type_id: R::TypeId, - types: &R, - out: &mut Vec, - ) -> Result<(), scale_encode::Error> { - self.0.encode_as_type_to(type_id, types, out) - } -} - -impl From> - for UncheckedExtrinsic -{ - fn from(bytes: Vec) -> Self { - UncheckedExtrinsic::new(bytes) - } -} - -impl From> - for Vec -{ - fn from(bytes: UncheckedExtrinsic) -> Self { - bytes.0.0.0 - } -} - -pub struct UncheckedExtrinsicDecodeAsTypeVisitor( - PhantomData<(Address, Call, Signature, Extra, R)>, -); - -impl Visitor - for UncheckedExtrinsicDecodeAsTypeVisitor -{ - type Value<'scale, 'info> = UncheckedExtrinsic; - type Error = scale_decode::Error; - type TypeResolver = R; - - fn unchecked_decode_as_type<'scale, 'info>( - self, - input: &mut &'scale [u8], - type_id: R::TypeId, - types: &'info R, - ) -> DecodeAsTypeResult, Self::Error>> { - DecodeAsTypeResult::Decoded(Self::Value::decode_as_type(input, type_id, types)) - } -} - -impl IntoVisitor - for UncheckedExtrinsic -{ - type AnyVisitor = - UncheckedExtrinsicDecodeAsTypeVisitor; - - fn into_visitor() - -> UncheckedExtrinsicDecodeAsTypeVisitor { - UncheckedExtrinsicDecodeAsTypeVisitor(PhantomData) - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - - use alloc::vec; - - #[test] - fn unchecked_extrinsic_encoding() { - // A tx is basically some bytes with a compact length prefix; ie an encoded vec: - let tx_bytes = vec![1u8, 2, 3].encode(); - - let unchecked_extrinsic = UncheckedExtrinsic::<(), (), (), ()>::new(tx_bytes.clone()); - let encoded_tx_bytes = unchecked_extrinsic.encode(); - - // The encoded representation must not alter the provided bytes. - assert_eq!(tx_bytes, encoded_tx_bytes); - - // However, for decoding we expect to be able to read the extrinsic from the wire - // which would be length prefixed. - let decoded_tx = UncheckedExtrinsic::<(), (), (), ()>::decode(&mut &tx_bytes[..]).unwrap(); - let decoded_tx_bytes = decoded_tx.bytes(); - let encoded_tx_bytes = decoded_tx.encode(); - - assert_eq!(decoded_tx_bytes, encoded_tx_bytes); - // Ensure we can decode the tx and fetch only the tx bytes. - assert_eq!(vec![1, 2, 3], encoded_tx_bytes); - } -} diff --git a/core/src/utils/wrapper_opaque.rs b/core/src/utils/wrapper_opaque.rs deleted file mode 100644 index 3cb6781fcf..0000000000 --- a/core/src/utils/wrapper_opaque.rs +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::PhantomDataSendSync; -use codec::{Compact, Decode, DecodeAll, Encode}; -use derive_where::derive_where; -use scale_decode::{IntoVisitor, TypeResolver, Visitor, ext::scale_type_resolver::visitor}; -use scale_encode::EncodeAsType; - -use alloc::format; -use alloc::vec::Vec; - -/// A wrapper for any type `T` which implement encode/decode in a way compatible with `Vec`. -/// [`WrapperKeepOpaque`] stores the type only in its opaque format, aka as a `Vec`. To -/// access the real type `T` [`Self::try_decode`] needs to be used. -// Dev notes: -// -// - This is adapted from [here](https://github.com/paritytech/substrate/blob/master/frame/support/src/traits/misc.rs). -// - The encoded bytes will be a compact encoded length followed by that number of bytes. -// - However, the TypeInfo describes the type as a composite with first a compact encoded length and next the type itself. -// [`Encode`] and [`Decode`] impls will "just work" to take this into a `Vec`, but we need a custom [`EncodeAsType`] -// and [`Visitor`] implementation to encode and decode based on TypeInfo. -#[derive(Encode, Decode)] -#[derive_where(Debug, Clone, PartialEq, Eq, Default, Hash)] -pub struct WrapperKeepOpaque { - data: Vec, - _phantom: PhantomDataSendSync, -} - -impl WrapperKeepOpaque { - /// Try to decode the wrapped type from the inner `data`. - /// - /// Returns `None` if the decoding failed. - pub fn try_decode(&self) -> Option - where - T: Decode, - { - T::decode_all(&mut &self.data[..]).ok() - } - - /// Returns the length of the encoded `T`. - pub fn encoded_len(&self) -> usize { - self.data.len() - } - - /// Returns the encoded data. - pub fn encoded(&self) -> &[u8] { - &self.data - } - - /// Create from the given encoded `data`. - pub fn from_encoded(data: Vec) -> Self { - Self { - data, - _phantom: PhantomDataSendSync::new(), - } - } - - /// Create from some raw value by encoding it. - pub fn from_value(value: T) -> Self - where - T: Encode, - { - Self { - data: value.encode(), - _phantom: PhantomDataSendSync::new(), - } - } -} - -impl EncodeAsType for WrapperKeepOpaque { - fn encode_as_type_to( - &self, - type_id: R::TypeId, - types: &R, - out: &mut Vec, - ) -> Result<(), scale_encode::Error> { - use scale_encode::error::{Error, ErrorKind, Kind}; - - let ctx = (type_id.clone(), out); - let visitor = visitor::new(ctx, |(type_id, _out), _| { - // Check that the target shape lines up: any other shape but composite is wrong. - Err(Error::new(ErrorKind::WrongShape { - actual: Kind::Struct, - expected_id: format!("{type_id:?}"), - })) - }) - .visit_composite(|(_type_id, out), _path, _fields| { - self.data.encode_to(out); - Ok(()) - }); - - types - .resolve_type(type_id.clone(), visitor) - .map_err(|_| Error::new(ErrorKind::TypeNotFound(format!("{type_id:?}"))))? - } -} - -pub struct WrapperKeepOpaqueVisitor(core::marker::PhantomData<(T, R)>); -impl Visitor for WrapperKeepOpaqueVisitor { - type Value<'scale, 'info> = WrapperKeepOpaque; - type Error = scale_decode::Error; - type TypeResolver = R; - - fn visit_composite<'scale, 'info>( - self, - value: &mut scale_decode::visitor::types::Composite<'scale, 'info, R>, - _type_id: R::TypeId, - ) -> Result, Self::Error> { - use scale_decode::error::{Error, ErrorKind}; - use scale_decode::visitor::DecodeError; - - if value.name() != Some("WrapperKeepOpaque") { - return Err(Error::new(ErrorKind::VisitorDecodeError( - DecodeError::TypeResolvingError(format!( - "Expected a type named 'WrapperKeepOpaque', got: {:?}", - value.name() - )), - ))); - } - - if value.remaining() != 2 { - return Err(Error::new(ErrorKind::WrongLength { - actual_len: value.remaining(), - expected_len: 2, - })); - } - - // The field to decode is a compact len followed by bytes. Decode the length, then grab the bytes. - let Compact(len) = value - .decode_item(Compact::::into_visitor()) - .expect("length checked")?; - let field = value.next().expect("length checked")?; - - // Sanity check that the compact length we decoded lines up with the number of bytes encoded in the next field. - if field.bytes().len() != len as usize { - return Err(Error::custom_str( - "WrapperTypeKeepOpaque compact encoded length doesn't line up with encoded byte len", - )); - } - - Ok(WrapperKeepOpaque { - data: field.bytes().to_vec(), - _phantom: PhantomDataSendSync::new(), - }) - } -} - -impl IntoVisitor for WrapperKeepOpaque { - type AnyVisitor = WrapperKeepOpaqueVisitor; - fn into_visitor() -> WrapperKeepOpaqueVisitor { - WrapperKeepOpaqueVisitor(core::marker::PhantomData) - } -} - -#[cfg(test)] -mod test { - use scale_decode::DecodeAsType; - - use alloc::vec; - - use super::*; - - // Copied from https://github.com/paritytech/substrate/blob/master/frame/support/src/traits/misc.rs - // and used for tests to check that we can work with the expected TypeInfo without needing to import - // the frame_support crate, which has quite a lot of dependencies. - impl scale_info::TypeInfo for WrapperKeepOpaque { - type Identity = Self; - fn type_info() -> scale_info::Type { - use scale_info::{Path, Type, TypeParameter, build::Fields, meta_type}; - - Type::builder() - .path(Path::new("WrapperKeepOpaque", module_path!())) - .type_params(vec![TypeParameter::new("T", Some(meta_type::()))]) - .composite( - Fields::unnamed() - .field(|f| f.compact::()) - .field(|f| f.ty::().type_name("T")), - ) - } - } - - /// Given a type definition, return type ID and registry representing it. - fn make_type() -> (u32, scale_info::PortableRegistry) { - let m = scale_info::MetaType::new::(); - let mut types = scale_info::Registry::new(); - let id = types.register_type(&m); - let portable_registry: scale_info::PortableRegistry = types.into(); - (id.id, portable_registry) - } - - fn roundtrips_like_scale_codec(t: T) - where - T: EncodeAsType - + DecodeAsType - + Encode - + Decode - + PartialEq - + core::fmt::Debug - + scale_info::TypeInfo - + 'static, - { - let (type_id, types) = make_type::(); - - let scale_codec_encoded = t.encode(); - let encode_as_type_encoded = t.encode_as_type(type_id, &types).unwrap(); - - assert_eq!( - scale_codec_encoded, encode_as_type_encoded, - "encoded bytes should match" - ); - - let decode_as_type_bytes = &mut &*scale_codec_encoded; - let decoded_as_type = T::decode_as_type(decode_as_type_bytes, type_id, &types) - .expect("decode-as-type decodes"); - - let decode_scale_codec_bytes = &mut &*scale_codec_encoded; - let decoded_scale_codec = T::decode(decode_scale_codec_bytes).expect("scale-codec decodes"); - - assert!( - decode_as_type_bytes.is_empty(), - "no bytes should remain in decode-as-type impl" - ); - assert!( - decode_scale_codec_bytes.is_empty(), - "no bytes should remain in codec-decode impl" - ); - - assert_eq!( - decoded_as_type, decoded_scale_codec, - "decoded values should match" - ); - } - - #[test] - fn wrapper_keep_opaque_roundtrips_ok() { - roundtrips_like_scale_codec(WrapperKeepOpaque::from_value(123u64)); - roundtrips_like_scale_codec(WrapperKeepOpaque::from_value(true)); - roundtrips_like_scale_codec(WrapperKeepOpaque::from_value(vec![1u8, 2, 3, 4])); - } -} diff --git a/core/src/view_functions/mod.rs b/core/src/view_functions/mod.rs deleted file mode 100644 index 9238331320..0000000000 --- a/core/src/view_functions/mod.rs +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Encode View Function payloads, decode the associated values returned from them, and validate -//! static View Function payloads. - -pub mod payload; - -use crate::Metadata; -use crate::error::ViewFunctionError; -use alloc::string::ToString; -use alloc::vec::Vec; -use payload::Payload; -use scale_decode::IntoVisitor; - -/// Run the validation logic against some View Function payload you'd like to use. Returns `Ok(())` -/// if the payload is valid (or if it's not possible to check since the payload has no validation hash). -/// Return an error if the payload was not valid or something went wrong trying to validate it (ie -/// the View Function in question do not exist at all) -pub fn validate(payload: P, metadata: &Metadata) -> Result<(), ViewFunctionError> { - let Some(hash) = payload.validation_hash() else { - return Ok(()); - }; - - let pallet_name = payload.pallet_name(); - let function_name = payload.function_name(); - - let view_function = metadata - .pallet_by_name(pallet_name) - .ok_or_else(|| ViewFunctionError::PalletNotFound(pallet_name.to_string()))? - .view_function_by_name(function_name) - .ok_or_else(|| ViewFunctionError::ViewFunctionNotFound { - pallet_name: pallet_name.to_string(), - function_name: function_name.to_string(), - })?; - - if hash != view_function.hash() { - Err(ViewFunctionError::IncompatibleCodegen) - } else { - Ok(()) - } -} - -/// The name of the Runtime API call which can execute -pub const CALL_NAME: &str = "RuntimeViewFunction_execute_view_function"; - -/// Encode the bytes that will be passed to the "execute_view_function" Runtime API call, -/// to execute the View Function represented by the given payload. -pub fn call_args( - payload: P, - metadata: &Metadata, -) -> Result, ViewFunctionError> { - let inputs = frame_decode::view_functions::encode_view_function_inputs( - payload.pallet_name(), - payload.function_name(), - payload.args(), - metadata, - metadata.types(), - ) - .map_err(ViewFunctionError::CouldNotEncodeInputs)?; - - Ok(inputs) -} - -/// Decode the value bytes at the location given by the provided View Function payload. -pub fn decode_value( - bytes: &mut &[u8], - payload: P, - metadata: &Metadata, -) -> Result { - let value = frame_decode::view_functions::decode_view_function_response( - payload.pallet_name(), - payload.function_name(), - bytes, - metadata, - metadata.types(), - P::ReturnType::into_visitor(), - ) - .map_err(ViewFunctionError::CouldNotDecodeResponse)?; - - Ok(value) -} diff --git a/core/src/view_functions/payload.rs b/core/src/view_functions/payload.rs deleted file mode 100644 index dba753dca2..0000000000 --- a/core/src/view_functions/payload.rs +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module contains the trait and types used to represent -//! View Function calls that can be made. - -use alloc::borrow::Cow; -use alloc::string::String; -use core::marker::PhantomData; -use derive_where::derive_where; -use frame_decode::view_functions::IntoEncodableValues; -use scale_decode::DecodeAsType; - -/// This represents a View Function payload that can call into the runtime of node. -/// -/// # Components -/// -/// - associated return type -/// -/// Resulting bytes of the call are interpreted into this type. -/// -/// - query ID -/// -/// The ID used to identify in the runtime which view function to call. -/// -/// - encoded arguments -/// -/// Each argument of the View Function must be scale-encoded. -pub trait Payload { - /// Type of the arguments for this call. - type ArgsType: IntoEncodableValues; - /// The return type of the function call. - type ReturnType: DecodeAsType; - - /// The View Function pallet name. - fn pallet_name(&self) -> &str; - - /// The View Function function name. - fn function_name(&self) -> &str; - - /// The arguments. - fn args(&self) -> &Self::ArgsType; - - /// Returns the statically generated validation hash. - fn validation_hash(&self) -> Option<[u8; 32]> { - None - } -} - -// A reference to a payload is a valid payload. -impl Payload for &'_ P { - type ArgsType = P::ArgsType; - type ReturnType = P::ReturnType; - - fn pallet_name(&self) -> &str { - P::pallet_name(*self) - } - - fn function_name(&self) -> &str { - P::function_name(*self) - } - - fn args(&self) -> &Self::ArgsType { - P::args(*self) - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - P::validation_hash(*self) - } -} - -/// A View Function payload containing the generic argument data -/// and interpreting the result of the call as `ReturnType`. -/// -/// This can be created from static values (ie those generated -/// via the `subxt` macro) or dynamic values via [`dynamic`]. -#[derive_where(Clone, Debug, Eq, Ord, PartialEq, PartialOrd; ArgsType)] -pub struct StaticPayload { - pallet_name: Cow<'static, str>, - function_name: Cow<'static, str>, - args: ArgsType, - validation_hash: Option<[u8; 32]>, - _marker: PhantomData, -} - -/// A dynamic View Function payload. -pub type DynamicPayload = StaticPayload; - -impl Payload - for StaticPayload -{ - type ArgsType = ArgsType; - type ReturnType = ReturnType; - - fn pallet_name(&self) -> &str { - &self.pallet_name - } - - fn function_name(&self) -> &str { - &self.function_name - } - - fn args(&self) -> &Self::ArgsType { - &self.args - } - - fn validation_hash(&self) -> Option<[u8; 32]> { - self.validation_hash - } -} - -impl StaticPayload { - /// Create a new [`StaticPayload`] for a View Function call. - pub fn new( - pallet_name: impl Into, - function_name: impl Into, - args: ArgsType, - ) -> Self { - StaticPayload { - pallet_name: pallet_name.into().into(), - function_name: function_name.into().into(), - args, - validation_hash: None, - _marker: PhantomData, - } - } - - /// Create a new static [`StaticPayload`] for a View Function call - /// using static function name and scale-encoded argument data. - /// - /// This is only expected to be used from codegen. - #[doc(hidden)] - pub fn new_static( - pallet_name: &'static str, - function_name: &'static str, - args: ArgsType, - hash: [u8; 32], - ) -> StaticPayload { - StaticPayload { - pallet_name: Cow::Borrowed(pallet_name), - function_name: Cow::Borrowed(function_name), - args, - validation_hash: Some(hash), - _marker: core::marker::PhantomData, - } - } - - /// Do not validate this call prior to submitting it. - pub fn unvalidated(self) -> Self { - Self { - validation_hash: None, - ..self - } - } -} - -/// Create a new [`DynamicPayload`] to call a View Function. -pub fn dynamic( - pallet_name: impl Into, - function_name: impl Into, - args: ArgsType, -) -> DynamicPayload { - DynamicPayload::new(pallet_name, function_name, args) -} diff --git a/historic/CHANGELOG.md b/historic/CHANGELOG.md deleted file mode 100644 index a7683f2889..0000000000 --- a/historic/CHANGELOG.md +++ /dev/null @@ -1,37 +0,0 @@ -# subxt-historic changelog - -This is separate from the Subxt changelog as subxt-historic is currently releasaed separately. - -Eventually this project will merge with Subxt and no longer exist, but until then it's being maintained and updated where needed. - -## 0.0.8 (2025-12-04) - -Expose `ClientAtBlock::resolver()`. This hands back a type resolver which is capable of resolving type IDs given by the `.visit()` methods on extrinsic fields and storage values. The extrinsics example has been modified to show how this can be used. - -## 0.0.7 (2025-12-03) - -Expose `OfflineClientAtBlock`, `OfflineClientAtBlockT`, `OnlinelientAtBlock`, `OnlineClientAtBlockT`. - -This is so that you can pass the `ClientAtBlock` into functions like so: - -```rust -use subxt_historic::config::Config; -use subxt_historic::client::{ ClientAtBlock, OnlineClientAtBlock, OnlineClientAtBlockT }; - -fn accepts_client_at_block_concrete(client: &ClientAtBlock, T>) { - // ... -} -fn accepts_client_at_block_generic<'conf, T: Config + 'conf, C: OnlineClientAtBlockT<'conf, T>>(client: &ClientAtBlock) { - // ... -} -``` - -## 0.0.6 (2025-12-01) - -- Add `.metadata()` on `ClientAtBlock` to expose the current metadata at some block. - -## 0.0.5 (2025-11-21) - -- Rename some fields for consistency. -- Update versions of underlying libraries being used. -- Add `.visit()` methods to extrinsic fields and storage values, and examples of using this to our examples. diff --git a/historic/Cargo.toml b/historic/Cargo.toml deleted file mode 100644 index 676b9d0f38..0000000000 --- a/historic/Cargo.toml +++ /dev/null @@ -1,63 +0,0 @@ -[package] -name = "subxt-historic" -version = "0.0.8" -authors.workspace = true -edition.workspace = true -rust-version.workspace = true -publish = true - -license.workspace = true -readme = "README.md" -repository.workspace = true -documentation.workspace = true -homepage.workspace = true -description = "Download non head-of-chain blocks and state from Substrate based nodes" -keywords = ["parity", "substrate", "blockchain"] - -[lints] -workspace = true - -[features] -default = ["jsonrpsee", "native"] - -# Enable this for native (ie non web/wasm builds). -# Exactly 1 of "web" and "native" is expected. -native = [ - "subxt-rpcs/native", -] - -# Enable this for web/wasm builds. -# Exactly 1 of "web" and "native" is expected. -web = [ - "subxt-rpcs/web", -] - -# Enable this to use the reconnecting rpc client -reconnecting-rpc-client = ["subxt-rpcs/reconnecting-rpc-client"] - -# Enable this to use jsonrpsee, which enables the jsonrpsee RPC client, and -# a couple of util functions which rely on jsonrpsee. -jsonrpsee = [ - "subxt-rpcs/jsonrpsee", -] - -[dependencies] -subxt-rpcs = { workspace = true } -frame-decode = { workspace = true, features = ["legacy", "legacy-types"] } -frame-metadata = { workspace = true, features = ["std", "legacy"] } -scale-type-resolver = { workspace = true, features = ["scale-info"] } -codec = { workspace = true } -primitive-types = { workspace = true } -scale-info = { workspace = true } -scale-info-legacy = { workspace = true } -scale-decode = { workspace = true } -thiserror = { workspace = true } -sp-crypto-hashing = { workspace = true } -url = { workspace = true } -futures = { workspace = true } - -[dev-dependencies] -tokio = { workspace = true, features = ["full"] } -scale-value = { workspace = true } -scale-decode = { workspace = true, features = ["derive"] } -hex = { workspace = true } diff --git a/historic/README.md b/historic/README.md deleted file mode 100644 index 43c2a9611b..0000000000 --- a/historic/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# subxt-historic - -**This crate is a work in progress and currently is released only as a preview.** - -While `subxt` is a library for working at the head of a chain (submitting transactions and obtaining the current state), `subxt-historic` is a library for decoding blocks and state that are anywhere in a chain. To broadly summarize the differences: - -| Feature | subxt | subxt-historic | -|-----------------------------------------|------------------------------|-------------------------------| -| Block access | Head of chain | Any block in chain | -| Connection to chain | Light client or RPC node | Archive RPC nodes only | -| Transaction submission | Yes | No | -| Metadata compatibility | V14 and newer | Any version | - -# Examples - -See the [examples](https://github.com/paritytech/subxt/tree/master/historic/examples) folder for examples of how to use `subxt-historic`. diff --git a/historic/examples/extrinsics.rs b/historic/examples/extrinsics.rs deleted file mode 100644 index b3f13905c3..0000000000 --- a/historic/examples/extrinsics.rs +++ /dev/null @@ -1,495 +0,0 @@ -#![allow(missing_docs)] -use subxt_historic::{OnlineClient, PolkadotConfig}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Configuration for the Polkadot relay chain. - let config = PolkadotConfig::new(); - - // Create an online client for the Polkadot relay chain, pointed at a Polkadot archive node. - let client = OnlineClient::from_url(config, "wss://rpc.polkadot.io").await?; - - // Iterate through some randomly selected old blocks to show how to fetch and decode extrinsics. - for block_number in 1234567.. { - println!("=== Block {block_number} ==="); - - // Point the client at a specific block number. By default this will download and cache - // metadata for the required spec version (so it's cheaper to instantiate again), if it - // hasn't already, and borrow the relevant legacy types from the client. - let client_at_block = client.at(block_number).await?; - - // Fetch the extrinsics at that block. - let extrinsics = client_at_block.extrinsics().fetch().await?; - - // Now, we have various operations to work with them. Here we print out various details - // about each extrinsic. - for extrinsic in extrinsics.iter() { - println!( - "{}.{}", - extrinsic.call().pallet_name(), - extrinsic.call().name() - ); - - if let Some(signature) = extrinsic.signature_bytes() { - println!(" Signature: 0x{}", hex::encode(signature)); - } - - println!(" Call Data:"); - - // We can decode each of the fields (in this example we decode everything into a - // scale_value::Value type, which can represent any SCALE encoded data, but if you - // have an idea of the type then you can try to decode into that type instead): - for field in extrinsic.call().fields().iter() { - // We can visit fields, which gives us the ability to inspect and decode information - // from them selectively, returning whatever we like from it. Here we demo our - // type name visitor which is defined below: - let tn = field - .visit(type_name::GetTypeName::new())? - .unwrap_or_default(); - - // When visiting fields we can also decode into a custom shape like so: - let _custom_value = - field.visit(value::GetValue::new(&client_at_block.resolver()))?; - - // We can also obtain and decode things without the complexity of the above: - println!( - " {}: {} {}", - field.name(), - field.decode_as::().unwrap(), - if tn.is_empty() { - String::new() - } else { - format!("(type name: {tn})") - }, - ); - } - - // Or, all of them at once: - println!( - " All: {}", - extrinsic - .call() - .fields() - .decode_as::>() - .unwrap() - ); - - // We can also look at things like the transaction extensions: - if let Some(extensions) = extrinsic.transaction_extensions() { - println!(" Transaction Extensions:"); - - // We can decode each of them: - for extension in extensions.iter() { - println!( - " {}: {}", - extension.name(), - extension.decode_as::().unwrap() - ); - } - - // Or all of them at once: - println!( - " All: {}", - extensions.decode_as::>().unwrap() - ); - } - } - } - - Ok(()) -} - -/// This module defines an example visitor which retrieves the name of a type. -/// This is a more advanced use case and can typically be avoided. -mod type_name { - use scale_decode::{ - Visitor, - visitor::types::{Composite, Sequence, Variant}, - visitor::{TypeIdFor, Unexpected}, - }; - use scale_type_resolver::TypeResolver; - - /// This is a visitor which obtains type names. - pub struct GetTypeName { - marker: core::marker::PhantomData, - } - - impl GetTypeName { - /// Construct our TypeName visitor. - pub fn new() -> Self { - GetTypeName { - marker: core::marker::PhantomData, - } - } - } - - impl Visitor for GetTypeName { - type Value<'scale, 'resolver> = Option<&'resolver str>; - type Error = scale_decode::Error; - type TypeResolver = R; - - // Look at the path of types that have paths and return the ident from that. - fn visit_composite<'scale, 'resolver>( - self, - value: &mut Composite<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(value.path().last()) - } - fn visit_variant<'scale, 'resolver>( - self, - value: &mut Variant<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(value.path().last()) - } - fn visit_sequence<'scale, 'resolver>( - self, - value: &mut Sequence<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(value.path().last()) - } - - // Else, we return nothing as we can't find a name for the type. - fn visit_unexpected<'scale, 'resolver>( - self, - _unexpected: Unexpected, - ) -> Result, Self::Error> { - Ok(None) - } - } -} - -/// This visitor demonstrates how to decode and return a custom Value shape -mod value { - use scale_decode::{ - Visitor, - visitor::TypeIdFor, - visitor::types::{Array, BitSequence, Composite, Sequence, Str, Tuple, Variant}, - }; - use scale_type_resolver::TypeResolver; - use std::collections::HashMap; - - /// A value type we're decoding into. - #[allow(dead_code)] - pub enum Value { - Number(f64), - BigNumber(String), - Bool(bool), - Char(char), - Array(Vec), - String(String), - Address(Vec), - I256([u8; 32]), - U256([u8; 32]), - Struct(HashMap), - VariantWithoutData(String), - VariantWithData(String, VariantFields), - } - - pub enum VariantFields { - Unnamed(Vec), - Named(HashMap), - } - - /// An error we can encounter trying to decode things into a [`Value`] - #[derive(Debug, thiserror::Error)] - pub enum ValueError { - #[error("Decode error: {0}")] - Decode(#[from] scale_decode::visitor::DecodeError), - #[error("Cannot decode bit sequence: {0}")] - CannotDecodeBitSequence(codec::Error), - #[error("Cannot resolve variant type information: {0}")] - CannotResolveVariantType(String), - } - - /// This is a visitor which obtains type names. - pub struct GetValue<'r, R> { - resolver: &'r R, - } - - impl<'r, R> GetValue<'r, R> { - /// Construct our TypeName visitor. - pub fn new(resolver: &'r R) -> Self { - GetValue { resolver } - } - } - - impl<'r, R: TypeResolver> Visitor for GetValue<'r, R> { - type Value<'scale, 'resolver> = Value; - type Error = ValueError; - type TypeResolver = R; - - fn visit_i256<'resolver>( - self, - value: &[u8; 32], - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(Value::I256(*value)) - } - - fn visit_u256<'resolver>( - self, - value: &[u8; 32], - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(Value::U256(*value)) - } - - fn visit_i128<'scale, 'resolver>( - self, - value: i128, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - let attempt = value as f64; - if attempt as i128 == value { - Ok(Value::Number(attempt)) - } else { - Ok(Value::BigNumber(value.to_string())) - } - } - - fn visit_i64<'scale, 'resolver>( - self, - value: i64, - type_id: TypeIdFor, - ) -> Result, Self::Error> { - self.visit_i128(value.into(), type_id) - } - - fn visit_i32<'scale, 'resolver>( - self, - value: i32, - type_id: TypeIdFor, - ) -> Result, Self::Error> { - self.visit_i128(value.into(), type_id) - } - - fn visit_i16<'scale, 'resolver>( - self, - value: i16, - type_id: TypeIdFor, - ) -> Result, Self::Error> { - self.visit_i128(value.into(), type_id) - } - - fn visit_i8<'scale, 'resolver>( - self, - value: i8, - type_id: TypeIdFor, - ) -> Result, Self::Error> { - self.visit_i128(value.into(), type_id) - } - - fn visit_u128<'scale, 'resolver>( - self, - value: u128, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - let attempt = value as f64; - if attempt as u128 == value { - Ok(Value::Number(attempt)) - } else { - Ok(Value::BigNumber(value.to_string())) - } - } - - fn visit_u64<'scale, 'resolver>( - self, - value: u64, - type_id: TypeIdFor, - ) -> Result, Self::Error> { - self.visit_u128(value.into(), type_id) - } - - fn visit_u32<'scale, 'resolver>( - self, - value: u32, - type_id: TypeIdFor, - ) -> Result, Self::Error> { - self.visit_u128(value.into(), type_id) - } - - fn visit_u16<'scale, 'resolver>( - self, - value: u16, - type_id: TypeIdFor, - ) -> Result, Self::Error> { - self.visit_u128(value.into(), type_id) - } - - fn visit_u8<'scale, 'resolver>( - self, - value: u8, - type_id: TypeIdFor, - ) -> Result, Self::Error> { - self.visit_u128(value.into(), type_id) - } - - fn visit_bool<'scale, 'resolver>( - self, - value: bool, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(Value::Bool(value)) - } - - fn visit_char<'scale, 'resolver>( - self, - value: char, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(Value::Char(value)) - } - - fn visit_array<'scale, 'resolver>( - self, - values: &mut Array<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(Value::Array(to_array( - self.resolver, - values.remaining(), - values, - )?)) - } - - fn visit_sequence<'scale, 'resolver>( - self, - values: &mut Sequence<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(Value::Array(to_array( - self.resolver, - values.remaining(), - values, - )?)) - } - - fn visit_str<'scale, 'resolver>( - self, - value: &mut Str<'scale>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(Value::String(value.as_str()?.to_owned())) - } - - fn visit_tuple<'scale, 'resolver>( - self, - values: &mut Tuple<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(Value::Array(to_array( - self.resolver, - values.remaining(), - values, - )?)) - } - - fn visit_bitsequence<'scale, 'resolver>( - self, - value: &mut BitSequence<'scale>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - let bits = value.decode()?; - let mut out = Vec::with_capacity(bits.len()); - for b in bits { - let b = b.map_err(ValueError::CannotDecodeBitSequence)?; - out.push(Value::Bool(b)); - } - Ok(Value::Array(out)) - } - - fn visit_composite<'scale, 'resolver>( - self, - value: &mut Composite<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - // Special case for ss58 addresses: - if let Some(n) = value.name() - && n == "AccountId32" - && value.bytes_from_start().len() == 32 - { - return Ok(Value::Address(value.bytes_from_start().to_vec())); - } - - // Reuse logic for decoding variant fields: - match to_variant_fieldish(self.resolver, value)? { - VariantFields::Named(s) => Ok(Value::Struct(s)), - VariantFields::Unnamed(a) => Ok(Value::Array(a)), - } - } - - fn visit_variant<'scale, 'resolver>( - self, - value: &mut Variant<'scale, 'resolver, Self::TypeResolver>, - type_id: TypeIdFor, - ) -> Result, Self::Error> { - // Because we have access to a type resolver on self, we can - // look up the type IDs we're given back and base decode decisions - // on them. here we see whether the enum type has any data attached: - let has_data_visitor = scale_type_resolver::visitor::new((), |_, _| false) - .visit_variant(|_, _, variants| { - for mut variant in variants { - if variant.fields.next().is_some() { - return true; - } - } - false - }); - - // Do any variants have data in this enum type? - let has_data = self - .resolver - .resolve_type(type_id, has_data_visitor) - .map_err(|e| ValueError::CannotResolveVariantType(e.to_string()))?; - - let name = value.name().to_owned(); - - // base our decoding on whether any data in enum type. - if has_data { - let fields = to_variant_fieldish(self.resolver, value.fields())?; - Ok(Value::VariantWithData(name, fields)) - } else { - Ok(Value::VariantWithoutData(name)) - } - } - } - - fn to_variant_fieldish<'r, 'scale, 'resolver, R: TypeResolver>( - resolver: &'r R, - value: &mut Composite<'scale, 'resolver, R>, - ) -> Result { - // If fields are unnamed, treat as array: - if value.fields().iter().all(|f| f.name.is_none()) { - return Ok(VariantFields::Unnamed(to_array( - resolver, - value.remaining(), - value, - )?)); - } - - // Otherwise object: - let mut out = HashMap::new(); - for field in value { - let field = field?; - let name = field.name().unwrap().to_string(); - let value = field.decode_with_visitor(GetValue::new(resolver))?; - out.insert(name, value); - } - Ok(VariantFields::Named(out)) - } - - fn to_array<'r, 'scale, 'resolver, R: TypeResolver>( - resolver: &'r R, - len: usize, - mut values: impl scale_decode::visitor::DecodeItemIterator<'scale, 'resolver, R>, - ) -> Result, ValueError> { - let mut out = Vec::with_capacity(len); - while let Some(value) = values.decode_item(GetValue::new(resolver)) { - out.push(value?); - } - Ok(out) - } -} diff --git a/historic/examples/storage.rs b/historic/examples/storage.rs deleted file mode 100644 index f61ed9ac02..0000000000 --- a/historic/examples/storage.rs +++ /dev/null @@ -1,175 +0,0 @@ -#![allow(missing_docs)] -use subxt_historic::{OnlineClient, PolkadotConfig, ext::StreamExt}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Configuration for the Polkadot relay chain. - let config = PolkadotConfig::new(); - - // Create an online client for the Polkadot relay chain, pointed at a Polkadot archive node. - let client = OnlineClient::from_url(config, "wss://rpc.polkadot.io").await?; - - // Iterate through some randomly selected blocks to show how to fetch and decode storage. - for block_number in 12345678.. { - println!("=== Block {block_number} ==="); - - // Point the client at a specific block number. By default this will download and cache - // metadata for the required spec version (so it's cheaper to instantiate again), if it - // hasn't already, and borrow the relevant legacy types from the client. - let client_at_block = client.at(block_number).await?; - - // We'll work the account balances at the given block, for this example. - let account_balances = client_at_block.storage().entry("System", "Account")?; - - // We can see the default value for this entry at this block, if one exists. - if let Some(default_value) = account_balances.default_value() { - let default_balance_info = default_value.decode_as::()?; - println!(" Default balance info: {default_balance_info}"); - } - - // We can fetch a specific account balance by its key, like so (here I just picked a random key - // I knew to exist from iterating over storage entries): - let account_id_hex = "9a4d0faa2ba8c3cc5711852960940793acf55bf195b6eecf88fa78e961d0ce4a"; - let account_id: [u8; 32] = hex::decode(account_id_hex).unwrap().try_into().unwrap(); - if let Some(entry) = account_balances.fetch((account_id,)).await? { - // We can decode the value into our generic `scale_value::Value` type, which can - // represent any SCALE-encoded value, like so: - let _balance_info = entry.decode_as::()?; - - // We can visit the value, which is a more advanced use case and allows us to extract more - // data from the type, here the name of it, if it exists: - let tn = entry - .visit(type_name::GetTypeName::new())? - .unwrap_or(""); - - // Or, if we know what shape to expect, we can decode the parts of the value that we care - // about directly into a static type, which is more efficient and allows easy type-safe - // access, like so: - #[derive(scale_decode::DecodeAsType)] - struct BalanceInfo { - data: BalanceInfoData, - } - #[derive(scale_decode::DecodeAsType)] - struct BalanceInfoData { - free: u128, - reserved: u128, - misc_frozen: u128, - fee_frozen: u128, - } - let balance_info = entry.decode_as::()?; - - println!( - " Single balance info from {account_id_hex} => free: {} reserved: {} misc_frozen: {} fee_frozen: {} (type name: {tn})", - balance_info.data.free, - balance_info.data.reserved, - balance_info.data.misc_frozen, - balance_info.data.fee_frozen, - ); - } - - // Or we can iterate over all of the account balances and print them out, like so. Here we provide an - // empty tuple, indicating that we want to iterate over everything and not only things under a certain key - // (in the case of account balances, there is only one key anyway, but other storage entries may map from - // several keys to a value, and for those we can choose which depth we iterate at by providing as many keys - // as we want and leaving the rest). Here I only take the first 10 accounts I find for the sake of the example. - let mut all_balances = account_balances.iter(()).await?.take(10); - while let Some(entry) = all_balances.next().await { - let entry = entry?; - let key = entry.key()?; - - // Decode the account ID from the key (we know here that we're working - // with a map which has one value, an account ID, so we just decode that part: - let account_id = key - .part(0) - .unwrap() - .decode_as::<[u8; 32]>()? - .expect("We expect this key to decode into a 32 byte AccountId"); - - let account_id_hex = hex::encode(account_id); - - // Decode these values into our generic scale_value::Value type. Less efficient than - // defining a static type as above, but easier for the sake of the example. - let balance_info = entry.value().decode_as::()?; - println!(" {account_id_hex} => {balance_info}"); - } - - // We can also chain things together to fetch and decode a value in one go. - let _val = client_at_block - .storage() - .entry("System", "Account")? - .fetch((account_id,)) - .await? - .unwrap() - .decode_as::()?; - - let _vals = client_at_block - .storage() - .entry("System", "Account")? - .iter(()) - .await?; - } - - Ok(()) -} - -/// This module defines an example visitor which retrieves the name of a type. -/// This is a more advanced use case and can typically be avoided. -mod type_name { - use scale_decode::{ - Visitor, - visitor::types::{Composite, Sequence, Variant}, - visitor::{TypeIdFor, Unexpected}, - }; - use scale_type_resolver::TypeResolver; - - /// This is a visitor which obtains type names. - pub struct GetTypeName { - marker: core::marker::PhantomData, - } - - impl GetTypeName { - /// Construct our TypeName visitor. - pub fn new() -> Self { - GetTypeName { - marker: core::marker::PhantomData, - } - } - } - - impl Visitor for GetTypeName { - type Value<'scale, 'resolver> = Option<&'resolver str>; - type Error = scale_decode::Error; - type TypeResolver = R; - - // Look at the path of types that have paths and return the ident from that. - fn visit_composite<'scale, 'resolver>( - self, - value: &mut Composite<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(value.path().last()) - } - fn visit_variant<'scale, 'resolver>( - self, - value: &mut Variant<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(value.path().last()) - } - fn visit_sequence<'scale, 'resolver>( - self, - value: &mut Sequence<'scale, 'resolver, Self::TypeResolver>, - _type_id: TypeIdFor, - ) -> Result, Self::Error> { - Ok(value.path().last()) - } - - // Else, we return nothing as we can't find a name for the type. - fn visit_unexpected<'scale, 'resolver>( - self, - _unexpected: Unexpected, - ) -> Result, Self::Error> { - Ok(None) - } - } -} diff --git a/historic/src/client.rs b/historic/src/client.rs deleted file mode 100644 index 421e36a28c..0000000000 --- a/historic/src/client.rs +++ /dev/null @@ -1,73 +0,0 @@ -mod offline_client; -mod online_client; - -use crate::config::Config; -use crate::extrinsics::ExtrinsicsClient; -use crate::storage::StorageClient; -use crate::utils::AnyResolver; -use frame_metadata::RuntimeMetadata; -use std::marker::PhantomData; - -pub use offline_client::{OfflineClient, OfflineClientAtBlock, OfflineClientAtBlockT}; -pub use online_client::{OnlineClient, OnlineClientAtBlock, OnlineClientAtBlockT}; - -/// This represents a client at a specific block number. -pub struct ClientAtBlock { - client: Client, - marker: PhantomData, -} - -impl ClientAtBlock { - /// Construct a new client at some block. - pub(crate) fn new(client: Client) -> Self { - Self { - client, - marker: PhantomData, - } - } -} - -impl<'client, T, Client> ClientAtBlock -where - T: Config + 'client, - Client: OfflineClientAtBlockT<'client, T>, -{ - /// Work with extrinsics. - pub fn extrinsics(&'_ self) -> ExtrinsicsClient<'_, Client, T> { - ExtrinsicsClient::new(&self.client) - } - - /// Work with storage. - pub fn storage(&'_ self) -> StorageClient<'_, Client, T> { - StorageClient::new(&self.client) - } - - /// Return the metadata in use at this block. - pub fn metadata(&self) -> &RuntimeMetadata { - self.client.metadata() - } - - /// Return something which implements [`scale_type_resolver::TypeResolver`] and - /// can be used in conjnction with type IDs in `.visit` methods. - pub fn resolver(&self) -> AnyResolver<'_, 'client> { - match self.client.metadata() { - RuntimeMetadata::V0(_) - | RuntimeMetadata::V1(_) - | RuntimeMetadata::V2(_) - | RuntimeMetadata::V3(_) - | RuntimeMetadata::V4(_) - | RuntimeMetadata::V5(_) - | RuntimeMetadata::V6(_) - | RuntimeMetadata::V7(_) - | RuntimeMetadata::V8(_) - | RuntimeMetadata::V9(_) - | RuntimeMetadata::V10(_) - | RuntimeMetadata::V11(_) - | RuntimeMetadata::V12(_) - | RuntimeMetadata::V13(_) => AnyResolver::B(self.client.legacy_types()), - RuntimeMetadata::V14(m) => AnyResolver::A(&m.types), - RuntimeMetadata::V15(m) => AnyResolver::A(&m.types), - RuntimeMetadata::V16(m) => AnyResolver::A(&m.types), - } - } -} diff --git a/historic/src/client/offline_client.rs b/historic/src/client/offline_client.rs deleted file mode 100644 index 209489b449..0000000000 --- a/historic/src/client/offline_client.rs +++ /dev/null @@ -1,83 +0,0 @@ -use super::ClientAtBlock; -use crate::config::Config; -use crate::error::OfflineClientAtBlockError; -use frame_metadata::RuntimeMetadata; -use scale_info_legacy::TypeRegistrySet; -use std::sync::Arc; - -/// A client which exposes the means to decode historic data on a chain offline. -#[derive(Clone, Debug)] -pub struct OfflineClient { - /// The configuration for this client. - config: Arc, -} - -impl OfflineClient { - /// Create a new [`OfflineClient`] with the given configuration. - pub fn new(config: T) -> Self { - OfflineClient { - config: Arc::new(config), - } - } - - /// Pick the block height at which to operate. This references data from the - /// [`OfflineClient`] it's called on, and so cannot outlive it. - pub fn at<'this>( - &'this self, - block_number: u64, - ) -> Result, T>, OfflineClientAtBlockError> { - let config = &self.config; - let spec_version = self - .config - .spec_version_for_block_number(block_number) - .ok_or(OfflineClientAtBlockError::SpecVersionNotFound { block_number })?; - - let legacy_types = self.config.legacy_types_for_spec_version(spec_version); - let metadata = self - .config - .metadata_for_spec_version(spec_version) - .ok_or(OfflineClientAtBlockError::MetadataNotFound { spec_version })?; - - Ok(ClientAtBlock::new(OfflineClientAtBlock { - config, - legacy_types, - metadata, - })) - } -} - -/// This represents an offline-only client at a specific block. -pub trait OfflineClientAtBlockT<'client, T: Config + 'client> { - /// Get the configuration for this client. - fn config(&self) -> &'client T; - /// Get the legacy types that work at this block. - fn legacy_types(&'_ self) -> &TypeRegistrySet<'client>; - /// Get the metadata appropriate for this block. - fn metadata(&self) -> &RuntimeMetadata; -} - -// Dev note: this shouldn't need to be exposed unless there is some -// need to explicitly name the ClientAAtBlock type. Rather keep it -// private to allow changes if possible. -pub struct OfflineClientAtBlock<'client, T: Config + 'client> { - /// The configuration for this chain. - config: &'client T, - /// Historic types to use at this block number. - legacy_types: TypeRegistrySet<'client>, - /// Metadata to use at this block number. - metadata: Arc, -} - -impl<'client, T: Config + 'client> OfflineClientAtBlockT<'client, T> - for OfflineClientAtBlock<'client, T> -{ - fn config(&self) -> &'client T { - self.config - } - fn legacy_types(&self) -> &TypeRegistrySet<'client> { - &self.legacy_types - } - fn metadata(&self) -> &RuntimeMetadata { - &self.metadata - } -} diff --git a/historic/src/client/online_client.rs b/historic/src/client/online_client.rs deleted file mode 100644 index 3aab638d2a..0000000000 --- a/historic/src/client/online_client.rs +++ /dev/null @@ -1,331 +0,0 @@ -use super::ClientAtBlock; -use crate::client::OfflineClientAtBlockT; -use crate::config::Config; -use crate::error::OnlineClientAtBlockError; -use codec::{Compact, Decode, Encode}; -use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; -use scale_info_legacy::TypeRegistrySet; -use std::sync::Arc; -use subxt_rpcs::methods::chain_head::ArchiveCallResult; -use subxt_rpcs::{ChainHeadRpcMethods, RpcClient}; - -#[cfg(feature = "jsonrpsee")] -#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))] -use crate::error::OnlineClientError; - -/// A client which exposes the means to decode historic data on a chain online. -#[derive(Clone, Debug)] -pub struct OnlineClient { - inner: Arc>, -} - -#[derive(Debug)] -struct OnlineClientInner { - /// The configuration for this client. - config: T, - /// The RPC methods used to communicate with the node. - rpc_methods: ChainHeadRpcMethods, -} - -// The default constructors assume Jsonrpsee. -#[cfg(feature = "jsonrpsee")] -#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))] -impl OnlineClient { - /// Construct a new [`OnlineClient`] using default settings which - /// point to a locally running node on `ws://127.0.0.1:9944`. - /// - /// **Note:** This will only work if the local node is an archive node. - pub async fn new(config: T) -> Result, OnlineClientError> { - let url = "ws://127.0.0.1:9944"; - OnlineClient::from_url(config, url).await - } - - /// Construct a new [`OnlineClient`], providing a URL to connect to. - pub async fn from_url( - config: T, - url: impl AsRef, - ) -> Result, OnlineClientError> { - let url_str = url.as_ref(); - let url = url::Url::parse(url_str).map_err(|_| OnlineClientError::InvalidUrl { - url: url_str.to_string(), - })?; - if !Self::is_url_secure(&url) { - return Err(OnlineClientError::RpcClientError( - subxt_rpcs::Error::InsecureUrl(url_str.to_string()), - )); - } - OnlineClient::from_insecure_url(config, url).await - } - - /// Construct a new [`OnlineClient`], providing a URL to connect to. - /// - /// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs). - pub async fn from_insecure_url( - config: T, - url: impl AsRef, - ) -> Result, OnlineClientError> { - let rpc_client = RpcClient::from_insecure_url(url).await?; - Ok(OnlineClient::from_rpc_client(config, rpc_client)) - } - - fn is_url_secure(url: &url::Url) -> bool { - let secure_scheme = url.scheme() == "https" || url.scheme() == "wss"; - let is_localhost = url.host().is_some_and(|e| match e { - url::Host::Domain(e) => e == "localhost", - url::Host::Ipv4(e) => e.is_loopback(), - url::Host::Ipv6(e) => e.is_loopback(), - }); - secure_scheme || is_localhost - } -} - -impl OnlineClient { - /// Construct a new [`OnlineClient`] by providing an [`RpcClient`] to drive the connection, - /// and some configuration for the chain we're connecting to. - pub fn from_rpc_client(config: T, rpc_client: impl Into) -> OnlineClient { - let rpc_client = rpc_client.into(); - let rpc_methods = ChainHeadRpcMethods::new(rpc_client); - OnlineClient { - inner: Arc::new(OnlineClientInner { - config, - rpc_methods, - }), - } - } - - /// Pick the block height at which to operate. This references data from the - /// [`OnlineClient`] it's called on, and so cannot outlive it. - pub async fn at( - &'_ self, - block_number: u64, - ) -> Result, T>, OnlineClientAtBlockError> { - let config = &self.inner.config; - let rpc_methods = &self.inner.rpc_methods; - - let block_hash = rpc_methods - .archive_v1_hash_by_height(block_number as usize) - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetBlockHash { - block_number, - reason: e, - })? - .pop() - .ok_or_else(|| OnlineClientAtBlockError::BlockNotFound { block_number })? - .into(); - - // Get our configuration, or fetch from the node if not available. - let spec_version = - if let Some(spec_version) = config.spec_version_for_block_number(block_number) { - spec_version - } else { - // Fetch spec version. Caching this doesn't really make sense, so either - // details are provided offline or we fetch them every time. - get_spec_version(rpc_methods, block_hash).await? - }; - let metadata = if let Some(metadata) = config.metadata_for_spec_version(spec_version) { - metadata - } else { - // Fetch and then give our config the opportunity to cache this metadata. - let metadata = get_metadata(rpc_methods, block_hash).await?; - let metadata = Arc::new(metadata); - config.set_metadata_for_spec_version(spec_version, metadata.clone()); - metadata - }; - - let mut historic_types = config.legacy_types_for_spec_version(spec_version); - // The metadata can be used to construct call and event types instead of us having to hardcode them all for every spec version: - let types_from_metadata = frame_decode::helpers::type_registry_from_metadata_any(&metadata) - .map_err( - |parse_error| OnlineClientAtBlockError::CannotInjectMetadataTypes { parse_error }, - )?; - historic_types.prepend(types_from_metadata); - - Ok(ClientAtBlock::new(OnlineClientAtBlock { - config, - historic_types, - metadata, - rpc_methods, - block_hash, - })) - } -} - -/// This represents an online client at a specific block. -pub trait OnlineClientAtBlockT<'client, T: Config + 'client>: - OfflineClientAtBlockT<'client, T> -{ - /// Return the RPC methods we'll use to interact with the node. - fn rpc_methods(&self) -> &ChainHeadRpcMethods; - /// Return the block hash for the current block. - fn block_hash(&self) -> ::Hash; -} - -// Dev note: this shouldn't need to be exposed unless there is some -// need to explicitly name the ClientAAtBlock type. Rather keep it -// private to allow changes if possible. -pub struct OnlineClientAtBlock<'client, T: Config + 'client> { - /// The configuration for this chain. - config: &'client T, - /// Historic types to use at this block number. - historic_types: TypeRegistrySet<'client>, - /// Metadata to use at this block number. - metadata: Arc, - /// We also need RPC methods for online interactions. - rpc_methods: &'client ChainHeadRpcMethods, - /// The block hash at which this client is operating. - block_hash: ::Hash, -} - -impl<'client, T: Config + 'client> OnlineClientAtBlockT<'client, T> - for OnlineClientAtBlock<'client, T> -{ - fn rpc_methods(&self) -> &ChainHeadRpcMethods { - self.rpc_methods - } - fn block_hash(&self) -> ::Hash { - self.block_hash - } -} - -impl<'client, T: Config + 'client> OfflineClientAtBlockT<'client, T> - for OnlineClientAtBlock<'client, T> -{ - fn config(&self) -> &'client T { - self.config - } - fn legacy_types(&'_ self) -> &TypeRegistrySet<'client> { - &self.historic_types - } - fn metadata(&self) -> &RuntimeMetadata { - &self.metadata - } -} - -async fn get_spec_version( - rpc_methods: &ChainHeadRpcMethods, - block_hash: ::Hash, -) -> Result { - use codec::Decode; - use subxt_rpcs::methods::chain_head::ArchiveCallResult; - - // make a runtime call to get the version information. This is also a constant - // in the metadata and so we could fetch it from there to avoid the call, but it would be a - // bit more effort. - let spec_version_bytes = { - let call_res = rpc_methods - .archive_v1_call(block_hash.into(), "Core_version", &[]) - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion { - block_hash: block_hash.to_string(), - reason: format!("Error calling Core_version: {e}"), - })?; - match call_res { - ArchiveCallResult::Success(bytes) => bytes.0, - ArchiveCallResult::Error(e) => { - return Err(OnlineClientAtBlockError::CannotGetSpecVersion { - block_hash: block_hash.to_string(), - reason: format!("Core_version returned an error: {e}"), - }); - } - } - }; - - // We only care about the spec version, so just decode enough of this version information - // to be able to pluck out what we want, and ignore the rest. - let spec_version = { - #[derive(codec::Decode)] - struct SpecVersionHeader { - _spec_name: String, - _impl_name: String, - _authoring_version: u32, - spec_version: u32, - } - SpecVersionHeader::decode(&mut &spec_version_bytes[..]) - .map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion { - block_hash: block_hash.to_string(), - reason: format!("Error decoding Core_version response: {e}"), - })? - .spec_version - }; - - Ok(spec_version) -} - -async fn get_metadata( - rpc_methods: &ChainHeadRpcMethods, - block_hash: ::Hash, -) -> Result { - // First, try to use the "modern" metadata APIs to get the most recent version we can. - let version_to_get = rpc_methods - .archive_v1_call(block_hash.into(), "Metadata_metadata_versions", &[]) - .await - .ok() - .and_then(|res| res.as_success()) - .and_then(|res| >::decode(&mut &res[..]).ok()) - .and_then(|versions| { - // We want to filter out the "unstable" version, which is represented by u32::MAX. - versions.into_iter().filter(|v| *v != u32::MAX).max() - }); - - // We had success calling the above API, so we expect the "modern" metadata API to work. - if let Some(version_to_get) = version_to_get { - let version_bytes = version_to_get.encode(); - let rpc_response = rpc_methods - .archive_v1_call( - block_hash.into(), - "Metadata_metadata_at_version", - &version_bytes, - ) - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.to_string(), - reason: format!("Error calling Metadata_metadata_at_version: {e}"), - }) - .and_then(|res| match res { - ArchiveCallResult::Success(bytes) => Ok(bytes.0), - ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.to_string(), - reason: format!("Calling Metadata_metadata_at_version returned an error: {e}"), - }), - })?; - - // Option because we may have asked for a version that doesn't exist. Compact because we get back a Vec - // of the metadata bytes, and the Vec is preceded by it's compact encoded length. The actual bytes are then - // decoded as a `RuntimeMetadataPrefixed`, after this. - let (_, metadata) = , RuntimeMetadataPrefixed)>>::decode(&mut &rpc_response[..]) - .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.to_string(), - reason: format!("Error decoding response for Metadata_metadata_at_version: {e}"), - })? - .ok_or_else(|| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.to_string(), - reason: format!("No metadata returned for the latest version from Metadata_metadata_versions ({version_to_get})"), - })?; - - return Ok(metadata.1); - } - - // We didn't get a version from Metadata_metadata_versions, so fall back to the "old" API. - let metadata_bytes = rpc_methods - .archive_v1_call(block_hash.into(), "Metadata_metadata", &[]) - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.to_string(), - reason: format!("Error calling Metadata_metadata: {e}"), - }) - .and_then(|res| match res { - ArchiveCallResult::Success(bytes) => Ok(bytes.0), - ArchiveCallResult::Error(e) => Err(OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.to_string(), - reason: format!("Calling Metadata_metadata returned an error: {e}"), - }), - })?; - - let (_, metadata) = <(Compact, RuntimeMetadataPrefixed)>::decode(&mut &metadata_bytes[..]) - .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.to_string(), - reason: format!("Error decoding response for Metadata_metadata: {e}"), - })?; - - Ok(metadata.1) -} diff --git a/historic/src/config.rs b/historic/src/config.rs deleted file mode 100644 index 3e9e9ddba9..0000000000 --- a/historic/src/config.rs +++ /dev/null @@ -1,56 +0,0 @@ -pub mod polkadot; -pub mod substrate; - -use scale_info_legacy::TypeRegistrySet; -use std::fmt::Display; -use std::sync::Arc; -use subxt_rpcs::RpcConfig; - -pub use polkadot::PolkadotConfig; -pub use substrate::SubstrateConfig; - -/// This represents the configuration needed for a specific chain. This includes -/// any hardcoded types we need to know about for that chain, as well as a means to -/// obtain historic types for that chain. -pub trait Config: RpcConfig { - /// The type of hashing used by the runtime. - type Hash: Clone - + Copy - + Display - + Into<::Hash> - + From<::Hash>; - - /// Return the spec version for a given block number, if available. - /// - /// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here, - /// but the [`crate::client::OfflineClient`] will error if this is not available for the required block number. - fn spec_version_for_block_number(&self, block_number: u64) -> Option; - - /// Return the metadata for a given spec version, if available. - /// - /// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here, and then - /// call [`Config::set_metadata_for_spec_version`] to give the configuration the opportunity to cache it. - /// The [`crate::client::OfflineClient`] will error if this is not available for the required spec version. - fn metadata_for_spec_version( - &self, - spec_version: u32, - ) -> Option>; - - /// Set some metadata for a given spec version. the [`crate::client::OnlineClient`] will call this if it has - /// to retrieve metadata from the chain, to give this the opportunity to cache it. The configuration can - /// do nothing if it prefers. - fn set_metadata_for_spec_version( - &self, - spec_version: u32, - metadata: Arc, - ); - - /// Return legacy types (ie types to use with Runtimes that return pre-V14 metadata) for a given spec version. - fn legacy_types_for_spec_version<'this>( - &'this self, - spec_version: u32, - ) -> TypeRegistrySet<'this>; - - /// Hash some bytes, for instance a block header or extrinsic, for this chain. - fn hash(s: &[u8]) -> ::Hash; -} diff --git a/historic/src/config/polkadot.rs b/historic/src/config/polkadot.rs deleted file mode 100644 index b42e5690f6..0000000000 --- a/historic/src/config/polkadot.rs +++ /dev/null @@ -1,88 +0,0 @@ -use super::Config; -use super::SubstrateConfig; -use scale_info_legacy::{ChainTypeRegistry, TypeRegistrySet}; -use std::sync::Arc; - -/// Configuration that's suitable for the Polkadot Relay Chain -pub struct PolkadotConfig(SubstrateConfig); - -impl PolkadotConfig { - /// Create a new PolkadotConfig. - pub fn new() -> Self { - let config = SubstrateConfig::new() - .set_legacy_types(frame_decode::legacy_types::polkadot::relay_chain()); - - // TODO: Set spec versions as well with known spec version changes, to speed - // up accessing historic blocks within the known ranges. For now, we just let - // the online client look these up on chain. - - Self(config) - } - - /// Set the metadata to be used for decoding blocks at the given spec versions. - pub fn set_metadata_for_spec_versions( - mut self, - ranges: impl Iterator, - ) -> Self { - self = Self(self.0.set_metadata_for_spec_versions(ranges)); - self - } - - /// Given an iterator of block ranges to spec version of the form `(start, end, spec_version)`, add them - /// to this configuration. - pub fn set_spec_version_for_block_ranges( - mut self, - ranges: impl Iterator, - ) -> Self { - self = Self(self.0.set_spec_version_for_block_ranges(ranges)); - self - } -} - -/// This hands back the legacy types for the Polkadot Relay Chain, which is what [`PolkadotConfig`] uses internally. -pub fn legacy_types() -> ChainTypeRegistry { - frame_decode::legacy_types::polkadot::relay_chain() -} - -impl Default for PolkadotConfig { - fn default() -> Self { - Self::new() - } -} - -impl Config for PolkadotConfig { - type Hash = ::Hash; - - fn legacy_types_for_spec_version(&'_ self, spec_version: u32) -> TypeRegistrySet<'_> { - self.0.legacy_types_for_spec_version(spec_version) - } - - fn spec_version_for_block_number(&self, block_number: u64) -> Option { - self.0.spec_version_for_block_number(block_number) - } - - fn metadata_for_spec_version( - &self, - spec_version: u32, - ) -> Option> { - self.0.metadata_for_spec_version(spec_version) - } - - fn set_metadata_for_spec_version( - &self, - spec_version: u32, - metadata: Arc, - ) { - self.0.set_metadata_for_spec_version(spec_version, metadata) - } - - fn hash(s: &[u8]) -> ::Hash { - SubstrateConfig::hash(s) - } -} - -impl subxt_rpcs::RpcConfig for PolkadotConfig { - type Hash = ::Hash; - type Header = ::Header; - type AccountId = ::AccountId; -} diff --git a/historic/src/config/substrate.rs b/historic/src/config/substrate.rs deleted file mode 100644 index 7516774071..0000000000 --- a/historic/src/config/substrate.rs +++ /dev/null @@ -1,129 +0,0 @@ -use super::Config; -use crate::utils::RangeMap; -use primitive_types::H256; -use scale_info_legacy::{ChainTypeRegistry, TypeRegistrySet}; -use std::collections::HashMap; -use std::sync::Arc; -use std::sync::Mutex; - -/// Configuration that's suitable for standard Substrate chains (ie those -/// that have not customized the block hash type). -pub struct SubstrateConfig { - legacy_types: ChainTypeRegistry, - spec_version_for_block_number: RangeMap, - metadata_for_spec_version: Mutex>>, -} - -impl SubstrateConfig { - /// Create a new SubstrateConfig with no legacy types. - /// - /// Without any further configuration, this will only work with - /// the [`crate::client::OnlineClient`] for blocks that were produced by Runtimes - /// that emit metadata V14 or later. - /// - /// To support working at any block with the [`crate::client::OnlineClient`], you - /// must call [`SubstrateConfig::set_legacy_types`] with appropriate legacy type - /// definitions. - /// - /// To support working with the [`crate::client::OfflineClient`] at any block, - /// you must also call: - /// - [`SubstrateConfig::set_metadata_for_spec_versions`] to set the metadata to - /// use at each spec version we might encounter. - /// - [`SubstrateConfig::set_spec_version_for_block_ranges`] to set the spec version - /// to use for each range of blocks we might encounter. - pub fn new() -> Self { - Self { - legacy_types: ChainTypeRegistry::empty(), - spec_version_for_block_number: RangeMap::empty(), - metadata_for_spec_version: Mutex::new(HashMap::new()), - } - } - - /// Set the legacy types to use for this configuration. This enables support for - /// blocks produced by Runtimes that emit metadata older than V14. - pub fn set_legacy_types(mut self, legacy_types: ChainTypeRegistry) -> Self { - self.legacy_types = legacy_types; - self - } - - /// Set the metadata to be used for decoding blocks at the given spec versions. - pub fn set_metadata_for_spec_versions( - self, - ranges: impl Iterator, - ) -> Self { - let mut map = self.metadata_for_spec_version.lock().unwrap(); - for (spec_version, metadata) in ranges { - map.insert(spec_version, Arc::new(metadata)); - } - drop(map); - self - } - - /// Given an iterator of block ranges to spec version of the form `(start, end, spec_version)`, add them - /// to this configuration. - pub fn set_spec_version_for_block_ranges( - mut self, - ranges: impl Iterator, - ) -> Self { - let mut m = RangeMap::builder(); - for (start, end, spec_version) in ranges { - m = m.add_range(start, end, spec_version); - } - self.spec_version_for_block_number = m.build(); - self - } -} - -impl Default for SubstrateConfig { - fn default() -> Self { - Self::new() - } -} - -impl Config for SubstrateConfig { - type Hash = H256; - - fn legacy_types_for_spec_version(&'_ self, spec_version: u32) -> TypeRegistrySet<'_> { - self.legacy_types.for_spec_version(spec_version as u64) - } - - fn spec_version_for_block_number(&self, block_number: u64) -> Option { - self.spec_version_for_block_number - .get(block_number) - .copied() - } - - fn metadata_for_spec_version( - &self, - spec_version: u32, - ) -> Option> { - self.metadata_for_spec_version - .lock() - .unwrap() - .get(&spec_version) - .cloned() - } - - fn set_metadata_for_spec_version( - &self, - spec_version: u32, - metadata: Arc, - ) { - self.metadata_for_spec_version - .lock() - .unwrap() - .insert(spec_version, metadata); - } - - fn hash(s: &[u8]) -> ::Hash { - sp_crypto_hashing::blake2_256(s).into() - } -} - -impl subxt_rpcs::RpcConfig for SubstrateConfig { - type Hash = ::Hash; - // We don't use these types in any of the RPC methods we call, - // so don't bother setting them up: - type Header = (); - type AccountId = (); -} diff --git a/historic/src/error.rs b/historic/src/error.rs deleted file mode 100644 index d84f534ffb..0000000000 --- a/historic/src/error.rs +++ /dev/null @@ -1,325 +0,0 @@ -/// Any error emitted by this crate can convert into this. -// Dev Note: All errors here are transparent, because in many places -// the inner errors are returned and so need to provide enough context -// as-is, so there shouldn't be anything to add here. -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum Error { - #[error(transparent)] - OnlineClientError(#[from] OnlineClientError), - #[error(transparent)] - OfflineClientAtBlockError(#[from] OfflineClientAtBlockError), - #[error(transparent)] - OnlineClientAtBlockError(#[from] OnlineClientAtBlockError), - #[error(transparent)] - ExtrinsicsError(#[from] ExtrinsicsError), - #[error(transparent)] - ExtrinsicTransactionExtensionError(#[from] ExtrinsicTransactionExtensionError), - #[error(transparent)] - ExtrinsicCallError(#[from] ExtrinsicCallError), - #[error(transparent)] - StorageError(#[from] StorageError), - #[error(transparent)] - StorageKeyError(#[from] StorageKeyError), - #[error(transparent)] - StorageValueError(#[from] StorageValueError), -} - -/// Errors constructing an online client. -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum OnlineClientError { - #[error("Cannot construct OnlineClient: The URL provided is invalid: {url}")] - InvalidUrl { - /// The URL that was invalid. - url: String, - }, - #[error("Cannot construct OnlineClient owing to an RPC client error: {0}")] - RpcClientError(#[from] subxt_rpcs::Error), -} - -/// Errors constructing an offline client at a specific block number. -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum OfflineClientAtBlockError { - #[error( - "Cannot construct OfflineClientAtBlock: spec version not found for block number {block_number}" - )] - SpecVersionNotFound { - /// The block number for which the spec version was not found. - block_number: u64, - }, - #[error( - "Cannot construct OfflineClientAtBlock: metadata not found for spec version {spec_version}" - )] - MetadataNotFound { - /// The spec version for which the metadata was not found. - spec_version: u32, - }, -} - -/// Errors constructing an online client at a specific block number. -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum OnlineClientAtBlockError { - #[error( - "Cannot construct OnlineClientAtBlock: failed to get block hash from node for block {block_number}: {reason}" - )] - CannotGetBlockHash { - /// Block number we failed to get the hash for. - block_number: u64, - /// The error we encountered. - reason: subxt_rpcs::Error, - }, - #[error("Cannot construct OnlineClientAtBlock: block number {block_number} not found")] - BlockNotFound { - /// The block number for which a block was not found. - block_number: u64, - }, - #[error( - "Cannot construct OnlineClientAtBlock: failed to get spec version for block hash {block_hash}: {reason}" - )] - CannotGetSpecVersion { - /// The block hash for which we failed to get the spec version. - block_hash: String, - /// The error we encountered. - reason: String, - }, - #[error( - "Cannot construct OnlineClientAtBlock: failed to get metadata for block hash {block_hash}: {reason}" - )] - CannotGetMetadata { - /// The block hash for which we failed to get the metadata. - block_hash: String, - /// The error we encountered. - reason: String, - }, - #[error( - "Cannot inject types from metadata: failure to parse a type found in the metadata: {parse_error}" - )] - CannotInjectMetadataTypes { - /// Error parsing a type found in the metadata. - parse_error: scale_info_legacy::lookup_name::ParseError, - }, -} - -/// Errors working with extrinsics. -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum ExtrinsicsError { - #[error("Could not fetch extrinsics: {reason}")] - FetchError { - /// The error that occurred while fetching the extrinsics. - reason: subxt_rpcs::Error, - }, - #[error("Could not decode extrinsic at index {index}: {reason}")] - DecodeError { - /// The extrinsic index that failed to decode. - index: usize, - /// The error that occurred during decoding. - reason: frame_decode::extrinsics::ExtrinsicDecodeError, - }, - #[error( - "Could not decode extrinsic at index {index}: there were undecoded bytes at the end, which implies that we did not decode it properly" - )] - LeftoverBytes { - /// The extrinsic index that had leftover bytes - index: usize, - /// The bytes that were left over after decoding the extrinsic. - leftover_bytes: Vec, - }, - #[error("Could not decode extrinsics: Unsupported metadata version ({version})")] - UnsupportedMetadataVersion { - /// The metadata version that is not supported. - version: u32, - }, -} - -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum ExtrinsicTransactionExtensionError { - #[error("Could not decode extrinsic transaction extensions: {reason}")] - AllDecodeError { - /// The error that occurred while decoding the transaction extensions. - reason: scale_decode::Error, - }, - #[error( - "Could not decode extrinsic transaction extensions: there were undecoded bytes at the end, which implies that we did not decode it properly" - )] - AllLeftoverBytes { - /// The bytes that were left over after decoding the transaction extensions. - leftover_bytes: Vec, - }, - #[error("Could not decode extrinsic transaction extension {name}: {reason}")] - DecodeError { - /// The name of the transaction extension that failed to decode. - name: String, - /// The error that occurred during decoding. - reason: scale_decode::Error, - }, - #[error( - "Could not decode extrinsic transaction extension {name}: there were undecoded bytes at the end, which implies that we did not decode it properly" - )] - LeftoverBytes { - /// The name of the transaction extension that had leftover bytes. - name: String, - /// The bytes that were left over after decoding the transaction extension. - leftover_bytes: Vec, - }, -} - -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum ExtrinsicCallError { - #[error("Could not decode the fields in extrinsic call: {reason}")] - FieldsDecodeError { - /// The error that occurred while decoding the fields of the extrinsic call. - reason: scale_decode::Error, - }, - #[error( - "Could not decode the fields in extrinsic call: there were undecoded bytes at the end, which implies that we did not decode it properly" - )] - FieldsLeftoverBytes { - /// The bytes that were left over after decoding the extrinsic call. - leftover_bytes: Vec, - }, - #[error("Could not decode field {name} in extrinsic call: {reason}")] - FieldDecodeError { - /// The name of the field that failed to decode. - name: String, - /// The error that occurred during decoding. - reason: scale_decode::Error, - }, - #[error( - "Could not decode field {name} in extrinsic call: there were undecoded bytes at the end, which implies that we did not decode it properly" - )] - FieldLeftoverBytes { - /// The name of the field that had leftover bytes. - name: String, - /// The bytes that were left over after decoding the extrinsic call. - leftover_bytes: Vec, - }, -} - -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[error("Storage entry is not a map: pallet {pallet_name}, storage {entry_name}")] -pub struct StorageEntryIsNotAMap { - /// The pallet containing the storage entry that was not found. - pub pallet_name: String, - /// The storage entry that was not found. - pub entry_name: String, -} - -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[error("Storage entry is not a plain value: pallet {pallet_name}, storage {entry_name}")] -pub struct StorageEntryIsNotAPlainValue { - /// The pallet containing the storage entry that was not found. - pub pallet_name: String, - /// The storage entry that was not found. - pub entry_name: String, -} - -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum StorageError { - #[error("RPC error interacting with storage APIs: {reason}")] - RpcError { - /// The error that occurred while fetching the storage entry. - reason: subxt_rpcs::Error, - }, - #[error("Could not fetch next entry from storage subscription: {reason}")] - StorageEventError { - /// The error that occurred while fetching the next storage entry. - reason: String, - }, - #[error("Could not construct storage key: {reason}")] - KeyEncodeError { - /// The error that occurred while constructing the storage key. - reason: frame_decode::storage::StorageKeyEncodeError, - }, - #[error( - "Wrong number of keys provided to fetch a value: expected {num_keys_expected} keys, but got {num_keys_provided}" - )] - WrongNumberOfKeysProvidedForFetch { - /// The number of keys that were provided. - num_keys_provided: usize, - /// The number of keys expected. - num_keys_expected: usize, - }, - #[error( - "too many keys were provided to iterate over a storage entry: expected at most {max_keys_expected} keys, but got {num_keys_provided}" - )] - TooManyKeysProvidedForIter { - /// The number of keys that were provided. - num_keys_provided: usize, - /// The maximum number of keys that we expect. - max_keys_expected: usize, - }, - #[error( - "Could not extract storage information from metadata: Unsupported metadata version ({version})" - )] - UnsupportedMetadataVersion { - /// The metadata version that is not supported. - version: u32, - }, - #[error("Could not extract storage information from metadata: {reason}")] - ExtractStorageInfoError { - /// The error that occurred while extracting storage information from the metadata. - reason: frame_decode::storage::StorageInfoError<'static>, - }, -} - -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum StorageKeyError { - #[error("Could not decode the storage key: {reason}")] - DecodeError { - /// The error that occurred while decoding the storage key information. - reason: frame_decode::storage::StorageKeyDecodeError, - }, - #[error( - "Could not decode the storage key: there were undecoded bytes at the end, which implies that we did not decode it properly" - )] - LeftoverBytes { - /// The bytes that were left over after decoding the storage key. - leftover_bytes: Vec, - }, - #[error("Could not decode the part of the storage key at index {index}: {reason}")] - DecodePartError { - index: usize, - reason: scale_decode::Error, - }, - #[error("Could not decode values out of the storage key: {reason}")] - DecodeKeyValueError { - reason: frame_decode::storage::StorageKeyValueDecodeError, - }, -} - -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum StorageValueError { - #[error("Could not decode storage value: {reason}")] - DecodeError { - /// The error that occurred while decoding the storage value. - reason: scale_decode::Error, - }, - #[error( - "Could not decode storage value: there were undecoded bytes at the end, which implies that we did not decode it properly" - )] - LeftoverBytes { - /// The bytes that were left over after decoding the storage value. - leftover_bytes: Vec, - }, -} diff --git a/historic/src/extrinsics.rs b/historic/src/extrinsics.rs deleted file mode 100644 index 2bfc0e4a95..0000000000 --- a/historic/src/extrinsics.rs +++ /dev/null @@ -1,76 +0,0 @@ -use crate::client::{OfflineClientAtBlockT, OnlineClientAtBlockT}; -use crate::config::Config; -use crate::error::ExtrinsicsError; - -mod extrinsic_call; -mod extrinsic_info; -mod extrinsic_transaction_extensions; -mod extrinsics_type; - -pub use extrinsic_transaction_extensions::ExtrinsicTransactionParams; -pub use extrinsics_type::{Extrinsic, Extrinsics}; - -/// Work with extrinsics. -pub struct ExtrinsicsClient<'atblock, Client, T> { - client: &'atblock Client, - marker: std::marker::PhantomData, -} - -impl<'atblock, Client, T> ExtrinsicsClient<'atblock, Client, T> { - /// Work with extrinsics. - pub(crate) fn new(client: &'atblock Client) -> Self { - Self { - client, - marker: std::marker::PhantomData, - } - } -} - -// Things that we can do online with extrinsics. -impl<'atblock, 'client: 'atblock, Client, T> ExtrinsicsClient<'atblock, Client, T> -where - T: Config + 'client, - Client: OnlineClientAtBlockT<'client, T>, -{ - /// Fetch the extrinsics for the current block. This is essentially a - /// combination of [`Self::fetch_bytes`] and [`Self::decode_from`]. - pub async fn fetch(&self) -> Result, ExtrinsicsError> { - let bytes: Vec> = self.fetch_bytes().await?; - - // Small optimization; no need to decode anything if no bytes. - if bytes.is_empty() { - return Ok(Extrinsics::empty()); - } - - self.decode_from(bytes) - } - - /// Fetch the bytes for the extrinsics in the current block. - pub async fn fetch_bytes(&self) -> Result>, ExtrinsicsError> { - let bytes: Vec> = self - .client - .rpc_methods() - .archive_v1_body(self.client.block_hash().into()) - .await - .map_err(|e| ExtrinsicsError::FetchError { reason: e })? - .map(|body| body.into_iter().map(|b| b.0).collect()) - .unwrap_or_default(); - - Ok(bytes) - } -} - -// Things that we can do offline with extrinsics. -impl<'atblock, 'client: 'atblock, Client, T> ExtrinsicsClient<'atblock, Client, T> -where - T: Config + 'client, - Client: OfflineClientAtBlockT<'client, T>, -{ - /// Given some bytes representing the extrinsics in this block, decode them into an [`Extrinsics`] type. - pub fn decode_from( - &self, - bytes: Vec>, - ) -> Result, ExtrinsicsError> { - Extrinsics::new(bytes, self.client) - } -} diff --git a/historic/src/extrinsics/extrinsic_call.rs b/historic/src/extrinsics/extrinsic_call.rs deleted file mode 100644 index 83f346c0b9..0000000000 --- a/historic/src/extrinsics/extrinsic_call.rs +++ /dev/null @@ -1,210 +0,0 @@ -use super::extrinsic_info::{AnyExtrinsicInfo, with_info}; -use crate::error::ExtrinsicCallError; -use crate::utils::Either; -use crate::utils::{AnyResolver, AnyTypeId}; -use scale_info_legacy::{LookupName, TypeRegistrySet}; - -/// This represents the call data in the extrinsic. -pub struct ExtrinsicCall<'extrinsics, 'atblock> { - all_bytes: &'extrinsics [u8], - info: &'extrinsics AnyExtrinsicInfo<'atblock>, -} - -impl<'extrinsics, 'atblock> ExtrinsicCall<'extrinsics, 'atblock> { - pub(crate) fn new( - all_bytes: &'extrinsics [u8], - info: &'extrinsics AnyExtrinsicInfo<'atblock>, - ) -> Self { - Self { all_bytes, info } - } - - /// The index of the pallet that this call is for - pub fn pallet_index(&self) -> u8 { - with_info!(&self.info => info.info.pallet_index()) - } - - /// The name of the pallet that this call is for. - pub fn pallet_name(&self) -> &str { - with_info!(&self.info => info.info.pallet_name()) - } - - /// The index of this call. - pub fn index(&self) -> u8 { - with_info!(&self.info => info.info.call_index()) - } - - /// The name of this call. - pub fn name(&self) -> &str { - with_info!(&self.info => info.info.call_name()) - } - - /// Get the raw bytes for the entire call, which includes the pallet and call index - /// bytes as well as the encoded arguments for each of the fields. - pub fn bytes(&self) -> &'extrinsics [u8] { - with_info!(&self.info => &self.all_bytes[info.info.call_data_range()]) - } - - /// Work with the fields in this call. - pub fn fields(&self) -> ExtrinsicCallFields<'extrinsics, 'atblock> { - ExtrinsicCallFields::new(self.all_bytes, self.info) - } -} - -/// This represents the fields of the call. -pub struct ExtrinsicCallFields<'extrinsics, 'atblock> { - all_bytes: &'extrinsics [u8], - info: &'extrinsics AnyExtrinsicInfo<'atblock>, - resolver: AnyResolver<'atblock, 'atblock>, -} - -impl<'extrinsics, 'atblock> ExtrinsicCallFields<'extrinsics, 'atblock> { - pub(crate) fn new( - all_bytes: &'extrinsics [u8], - info: &'extrinsics AnyExtrinsicInfo<'atblock>, - ) -> Self { - let resolver = match info { - AnyExtrinsicInfo::Legacy(info) => AnyResolver::B(info.resolver), - AnyExtrinsicInfo::Current(info) => AnyResolver::A(info.resolver), - }; - - Self { - all_bytes, - info, - resolver, - } - } - - /// Return the bytes representing the fields stored in this extrinsic. - /// - /// # Note - /// - /// This is a subset of [`ExtrinsicCall::bytes`] that does not include the - /// first two bytes that denote the pallet index and the variant index. - pub fn bytes(&self) -> &'extrinsics [u8] { - with_info!(&self.info => &self.all_bytes[info.info.call_data_args_range()]) - } - - /// Iterate over each of the fields of the extrinsic call data. - pub fn iter(&self) -> impl Iterator> { - match &self.info { - AnyExtrinsicInfo::Legacy(info) => { - Either::A(info.info.call_data().map(|named_arg| ExtrinsicCallField { - field_bytes: &self.all_bytes[named_arg.range()], - resolver: &self.resolver, - info: AnyExtrinsicCallFieldInfo::Legacy(ExtrinsicCallFieldInfo { - info: named_arg, - resolver: info.resolver, - }), - })) - } - AnyExtrinsicInfo::Current(info) => { - Either::B(info.info.call_data().map(|named_arg| ExtrinsicCallField { - field_bytes: &self.all_bytes[named_arg.range()], - resolver: &self.resolver, - info: AnyExtrinsicCallFieldInfo::Current(ExtrinsicCallFieldInfo { - info: named_arg, - resolver: info.resolver, - }), - })) - } - } - } - - /// Attempt to decode the fields into the given type. - pub fn decode_as(&self) -> Result { - with_info!(&self.info => { - let cursor = &mut self.bytes(); - let mut fields = &mut info.info.call_data().map(|named_arg| { - scale_decode::Field::new(named_arg.ty().clone(), Some(named_arg.name())) - }); - - let decoded = T::decode_as_fields(cursor, &mut fields, info.resolver) - .map_err(|e| ExtrinsicCallError::FieldsDecodeError { reason: e })?; - - if !cursor.is_empty() { - return Err(ExtrinsicCallError::FieldsLeftoverBytes { - leftover_bytes: cursor.to_vec(), - }) - } - - Ok(decoded) - }) - } -} - -pub struct ExtrinsicCallField<'fields, 'extrinsics, 'atblock> { - field_bytes: &'extrinsics [u8], - info: AnyExtrinsicCallFieldInfo<'extrinsics, 'atblock>, - resolver: &'fields AnyResolver<'atblock, 'atblock>, -} - -enum AnyExtrinsicCallFieldInfo<'extrinsics, 'atblock> { - Legacy(ExtrinsicCallFieldInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>), - Current(ExtrinsicCallFieldInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>), -} - -struct ExtrinsicCallFieldInfo<'extrinsics, 'atblock, TypeId, Resolver> { - info: &'extrinsics frame_decode::extrinsics::NamedArg<'atblock, TypeId>, - resolver: &'atblock Resolver, -} - -macro_rules! with_call_field_info { - (&$self:ident.$info:ident => $fn:expr) => { - #[allow(clippy::clone_on_copy)] - match &$self.$info { - AnyExtrinsicCallFieldInfo::Legacy($info) => $fn, - AnyExtrinsicCallFieldInfo::Current($info) => $fn, - } - }; -} - -impl<'fields, 'extrinsics, 'atblock> ExtrinsicCallField<'fields, 'extrinsics, 'atblock> { - /// Get the raw bytes for this field. - pub fn bytes(&self) -> &'extrinsics [u8] { - self.field_bytes - } - - /// Get the name of this field. - pub fn name(&self) -> &'extrinsics str { - with_call_field_info!(&self.info => info.info.name()) - } - - /// Visit the given field with a [`scale_decode::visitor::Visitor`]. This is like a lower level - /// version of [`ExtrinsicCallField::decode_as`], as the visitor is able to preserve lifetimes - /// and has access to more type information than is available via [`ExtrinsicCallField::decode_as`]. - pub fn visit< - V: scale_decode::visitor::Visitor>, - >( - &self, - visitor: V, - ) -> Result, V::Error> { - let type_id = match &self.info { - AnyExtrinsicCallFieldInfo::Current(info) => AnyTypeId::A(*info.info.ty()), - AnyExtrinsicCallFieldInfo::Legacy(info) => AnyTypeId::B(info.info.ty().clone()), - }; - let cursor = &mut self.bytes(); - - scale_decode::visitor::decode_with_visitor(cursor, type_id, self.resolver, visitor) - } - - /// Attempt to decode the value of this field into the given type. - pub fn decode_as(&self) -> Result { - with_call_field_info!(&self.info => { - let cursor = &mut &*self.field_bytes; - let decoded = T::decode_as_type(cursor, info.info.ty().clone(), info.resolver) - .map_err(|e| ExtrinsicCallError::FieldDecodeError { - name: info.info.name().to_string(), - reason: e, - })?; - - if !cursor.is_empty() { - return Err(ExtrinsicCallError::FieldLeftoverBytes { - name: info.info.name().to_string(), - leftover_bytes: cursor.to_vec(), - }); - } - - Ok(decoded) - }) - } -} diff --git a/historic/src/extrinsics/extrinsic_info.rs b/historic/src/extrinsics/extrinsic_info.rs deleted file mode 100644 index 6207b80ead..0000000000 --- a/historic/src/extrinsics/extrinsic_info.rs +++ /dev/null @@ -1,109 +0,0 @@ -use crate::error::ExtrinsicsError; -use frame_metadata::RuntimeMetadata; -use scale_info_legacy::{LookupName, TypeRegistrySet}; - -// Extrinsic information for modern or legacy extrinsics. -#[allow(clippy::large_enum_variant)] -pub enum AnyExtrinsicInfo<'atblock> { - Legacy(ExtrinsicInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>), - Current(ExtrinsicInfo<'atblock, u32, scale_info::PortableRegistry>), -} - -impl<'atblock> AnyExtrinsicInfo<'atblock> { - /// For a slice of extrinsics, return a vec of information about each one. - pub fn new( - bytes: &[Vec], - metadata: &'atblock RuntimeMetadata, - legacy_types: &'atblock TypeRegistrySet<'atblock>, - ) -> Result, ExtrinsicsError> { - let infos = match metadata { - RuntimeMetadata::V8(m) => extrinsic_info_inner(bytes, m, legacy_types), - RuntimeMetadata::V9(m) => extrinsic_info_inner(bytes, m, legacy_types), - RuntimeMetadata::V10(m) => extrinsic_info_inner(bytes, m, legacy_types), - RuntimeMetadata::V11(m) => extrinsic_info_inner(bytes, m, legacy_types), - RuntimeMetadata::V12(m) => extrinsic_info_inner(bytes, m, legacy_types), - RuntimeMetadata::V13(m) => extrinsic_info_inner(bytes, m, legacy_types), - RuntimeMetadata::V14(m) => extrinsic_info_inner(bytes, m, &m.types), - RuntimeMetadata::V15(m) => extrinsic_info_inner(bytes, m, &m.types), - RuntimeMetadata::V16(m) => extrinsic_info_inner(bytes, m, &m.types), - unknown => { - return Err(ExtrinsicsError::UnsupportedMetadataVersion { - version: unknown.version(), - }); - } - }?; - - fn extrinsic_info_inner<'atblock, Info, Resolver>( - bytes: &[Vec], - args_info: &'atblock Info, - type_resolver: &'atblock Resolver, - ) -> Result>, ExtrinsicsError> - where - Info: frame_decode::extrinsics::ExtrinsicTypeInfo, - Info::TypeId: Clone + core::fmt::Display + core::fmt::Debug + Send + Sync + 'static, - Resolver: scale_type_resolver::TypeResolver, - AnyExtrinsicInfo<'atblock>: From>, - { - bytes - .iter() - .enumerate() - .map(|(index, bytes)| { - let cursor = &mut &**bytes; - let extrinsic_info = frame_decode::extrinsics::decode_extrinsic( - cursor, - args_info, - type_resolver, - ) - .map_err(|reason| ExtrinsicsError::DecodeError { index, reason })?; - - if !cursor.is_empty() { - return Err(ExtrinsicsError::LeftoverBytes { - index, - leftover_bytes: cursor.to_vec(), - }); - } - - Ok(ExtrinsicInfo { - info: extrinsic_info, - resolver: type_resolver, - } - .into()) - }) - .collect() - } - - Ok(infos) - } -} - -impl<'atblock> From>> - for AnyExtrinsicInfo<'atblock> -{ - fn from(info: ExtrinsicInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>) -> Self { - AnyExtrinsicInfo::Legacy(info) - } -} -impl<'atblock> From> - for AnyExtrinsicInfo<'atblock> -{ - fn from(info: ExtrinsicInfo<'atblock, u32, scale_info::PortableRegistry>) -> Self { - AnyExtrinsicInfo::Current(info) - } -} - -// Extrinsic information for a specific type ID and resolver type. -pub struct ExtrinsicInfo<'atblock, TypeId, Resolver> { - pub info: frame_decode::extrinsics::Extrinsic<'atblock, TypeId>, - pub resolver: &'atblock Resolver, -} - -macro_rules! with_info { - (&$self:ident.$info:ident => $fn:expr) => { - #[allow(clippy::clone_on_copy)] - match &$self.$info { - AnyExtrinsicInfo::Legacy($info) => $fn, - AnyExtrinsicInfo::Current($info) => $fn, - } - }; -} -pub(crate) use with_info; diff --git a/historic/src/extrinsics/extrinsic_transaction_extensions.rs b/historic/src/extrinsics/extrinsic_transaction_extensions.rs deleted file mode 100644 index 3f36010251..0000000000 --- a/historic/src/extrinsics/extrinsic_transaction_extensions.rs +++ /dev/null @@ -1,213 +0,0 @@ -use super::extrinsic_info::AnyExtrinsicInfo; -use crate::error::ExtrinsicTransactionExtensionError; -use crate::utils::Either; -use frame_decode::helpers::scale_decode; -use scale_info_legacy::{LookupName, TypeRegistrySet}; - -// Extrinsic extensions information for modern or legacy extrinsics. -enum AnyExtrinsicExtensionsInfo<'extrinsics, 'atblock> { - Legacy(ExtrinsicExtensionsInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>), - Current(ExtrinsicExtensionsInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>), -} - -struct ExtrinsicExtensionsInfo<'extrinsics, 'atblock, TypeId, Resolver> { - info: &'extrinsics frame_decode::extrinsics::ExtrinsicExtensions<'atblock, TypeId>, - resolver: &'atblock Resolver, -} - -/// This represents the transaction extensions of an extrinsic. -pub struct ExtrinsicTransactionParams<'extrinsics, 'atblock> { - all_bytes: &'extrinsics [u8], - info: AnyExtrinsicExtensionsInfo<'extrinsics, 'atblock>, -} - -macro_rules! with_extensions_info { - (&$self:ident.$info:ident => $fn:expr) => { - #[allow(clippy::clone_on_copy)] - match &$self.$info { - AnyExtrinsicExtensionsInfo::Legacy($info) => $fn, - AnyExtrinsicExtensionsInfo::Current($info) => $fn, - } - }; -} - -impl<'extrinsics, 'atblock> ExtrinsicTransactionParams<'extrinsics, 'atblock> { - pub(crate) fn new( - all_bytes: &'extrinsics [u8], - info: &'extrinsics AnyExtrinsicInfo<'atblock>, - ) -> Option { - match info { - AnyExtrinsicInfo::Current(info) => { - let extension_info = info.info.transaction_extension_payload()?; - Some(Self { - all_bytes, - info: AnyExtrinsicExtensionsInfo::Current(ExtrinsicExtensionsInfo { - info: extension_info, - resolver: info.resolver, - }), - }) - } - AnyExtrinsicInfo::Legacy(info) => { - let extension_info = info.info.transaction_extension_payload()?; - Some(Self { - all_bytes, - info: AnyExtrinsicExtensionsInfo::Legacy(ExtrinsicExtensionsInfo { - info: extension_info, - resolver: info.resolver, - }), - }) - } - } - } - - /// Get the raw bytes for all of the transaction extensions. - pub fn bytes(&self) -> &'extrinsics [u8] { - with_extensions_info!(&self.info => &self.all_bytes[info.info.range()]) - } - - /// iterate over each of the transaction extensions in this extrinsic. - pub fn iter( - &self, - ) -> impl Iterator> { - match &self.info { - AnyExtrinsicExtensionsInfo::Legacy(extension_info) => { - let iter = extension_info - .info - .iter() - .map(|s| ExtrinsicTransactionExtension { - bytes: &self.all_bytes[s.range()], - info: ExtrinsicExtensionInfo { - name: s.name(), - type_id: s.ty(), - resolver: extension_info.resolver, - } - .into(), - }); - Either::A(iter) - } - AnyExtrinsicExtensionsInfo::Current(extension_info) => { - let iter = extension_info - .info - .iter() - .map(|s| ExtrinsicTransactionExtension { - bytes: &self.all_bytes[s.range()], - info: ExtrinsicExtensionInfo { - name: s.name(), - type_id: s.ty(), - resolver: extension_info.resolver, - } - .into(), - }); - Either::B(iter) - } - } - } - - /// Attempt to decode the transaction extensions into a type where each field name is the name of the transaction - /// extension and the field value is the decoded extension. - pub fn decode_as( - &self, - ) -> Result { - with_extensions_info!(&self.info => { - let cursor = &mut self.bytes(); - let mut fields = &mut info.info.iter().map(|named_arg| { - scale_decode::Field::new(named_arg.ty().clone(), Some(named_arg.name())) - }); - - let decoded = T::decode_as_fields(cursor, &mut fields, info.resolver) - .map_err(|e| ExtrinsicTransactionExtensionError::AllDecodeError { reason: e })?; - - if !cursor.is_empty() { - return Err(ExtrinsicTransactionExtensionError::AllLeftoverBytes { - leftover_bytes: cursor.to_vec(), - }) - } - - Ok(decoded) - }) - } -} - -// Extrinsic single extension information for modern or legacy extrinsics. -enum AnyExtrinsicExtensionInfo<'extrinsics, 'atblock> { - Legacy(ExtrinsicExtensionInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>), - Current(ExtrinsicExtensionInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>), -} - -impl<'extrinsics, 'atblock> - From>> - for AnyExtrinsicExtensionInfo<'extrinsics, 'atblock> -{ - fn from( - info: ExtrinsicExtensionInfo<'extrinsics, 'atblock, LookupName, TypeRegistrySet<'atblock>>, - ) -> Self { - AnyExtrinsicExtensionInfo::Legacy(info) - } -} -impl<'extrinsics, 'atblock> - From> - for AnyExtrinsicExtensionInfo<'extrinsics, 'atblock> -{ - fn from( - info: ExtrinsicExtensionInfo<'extrinsics, 'atblock, u32, scale_info::PortableRegistry>, - ) -> Self { - AnyExtrinsicExtensionInfo::Current(info) - } -} - -struct ExtrinsicExtensionInfo<'extrinsics, 'atblock, TypeId, Resolver> { - name: &'extrinsics str, - type_id: &'extrinsics TypeId, - resolver: &'atblock Resolver, -} - -macro_rules! with_extension_info { - (&$self:ident.$info:ident => $fn:expr) => { - #[allow(clippy::clone_on_copy)] - match &$self.$info { - AnyExtrinsicExtensionInfo::Legacy($info) => $fn, - AnyExtrinsicExtensionInfo::Current($info) => $fn, - } - }; -} - -/// This represents a single transaction extension in an extrinsic. -pub struct ExtrinsicTransactionExtension<'extrinsics, 'atblock> { - bytes: &'extrinsics [u8], - info: AnyExtrinsicExtensionInfo<'extrinsics, 'atblock>, -} - -impl<'extrinsics, 'atblock> ExtrinsicTransactionExtension<'extrinsics, 'atblock> { - /// The bytes for this transaction extension. - pub fn bytes(&self) -> &'extrinsics [u8] { - self.bytes - } - - /// The name/identifier for this transaction extension. - pub fn name(&self) -> &'extrinsics str { - with_extension_info!(&self.info => info.name) - } - - /// Decode the bytes for this transaction extension into a type that implements `scale_decode::DecodeAsType`. - pub fn decode_as( - &self, - ) -> Result { - with_extension_info!(&self.info => { - let cursor = &mut &*self.bytes; - let decoded = T::decode_as_type(cursor, info.type_id.clone(), info.resolver) - .map_err(|reason| ExtrinsicTransactionExtensionError::DecodeError { - name: info.name.to_string(), - reason - })?; - - if !cursor.is_empty() { - return Err(ExtrinsicTransactionExtensionError::LeftoverBytes { - name: info.name.to_string(), - leftover_bytes: cursor.to_vec(), - }); - } - - Ok(decoded) - }) - } -} diff --git a/historic/src/extrinsics/extrinsics_type.rs b/historic/src/extrinsics/extrinsics_type.rs deleted file mode 100644 index 520b314759..0000000000 --- a/historic/src/extrinsics/extrinsics_type.rs +++ /dev/null @@ -1,112 +0,0 @@ -use super::extrinsic_call::ExtrinsicCall; -use super::extrinsic_info::{AnyExtrinsicInfo, with_info}; -use super::extrinsic_transaction_extensions::ExtrinsicTransactionParams; -use crate::client::OfflineClientAtBlockT; -use crate::config::Config; -use crate::error::ExtrinsicsError; - -/// This represents some extrinsics in a block, and carries everything that we need to decode information out of them. -pub struct Extrinsics<'atblock> { - bytes: Vec>, - // Each index in this vec should line up with one index in the above vec. - infos: Vec>, -} - -impl<'atblock> Extrinsics<'atblock> { - // In here we hide the messy logic needed to decode extrinsics into a consistent output given either current or legacy metadata. - pub(crate) fn new<'client: 'atblock, T, Client>( - bytes: Vec>, - client: &'atblock Client, - ) -> Result - where - T: Config + 'client, - Client: OfflineClientAtBlockT<'client, T>, - { - let infos = AnyExtrinsicInfo::new(&bytes, client.metadata(), client.legacy_types())?; - Ok(Extrinsics { bytes, infos }) - } - - pub(crate) fn empty() -> Self { - Self { - bytes: vec![], - infos: vec![], - } - } - - /// How many extrinsics are in this block? - pub fn len(&self) -> usize { - self.bytes.len() - } - - /// Are there any extrinsics in this block? - pub fn is_empty(&self) -> bool { - self.bytes.is_empty() - } - - /// Iterate over the extrinsics. - pub fn iter(&self) -> impl Iterator> { - self.bytes - .iter() - .zip(self.infos.iter()) - .enumerate() - .map(|(idx, (bytes, info))| Extrinsic { idx, bytes, info }) - } -} - -/// This represents an extrinsic, and carries everything that we need to decode information out of it. -pub struct Extrinsic<'extrinsics, 'atblock> { - idx: usize, - bytes: &'extrinsics [u8], - info: &'extrinsics AnyExtrinsicInfo<'atblock>, -} - -impl<'extrinsics, 'atblock> Extrinsic<'extrinsics, 'atblock> { - /// Get the index of this extrinsic in the block. - pub fn index(&self) -> usize { - self.idx - } - - /// Get the raw bytes of this extrinsic. - pub fn bytes(&self) -> &'extrinsics [u8] { - self.bytes - } - - /// Is this extrinsic signed? - pub fn is_signed(&self) -> bool { - with_info!(&self.info => info.info.is_signed()) - } - - /// Return information about the call that this extrinsic is making. - pub fn call(&self) -> ExtrinsicCall<'extrinsics, 'atblock> { - ExtrinsicCall::new(self.bytes, self.info) - } - - /// Return only the bytes of the address that signed this extrinsic. - /// - /// # Note - /// - /// Returns `None` if the extrinsic is not signed. - pub fn address_bytes(&self) -> Option<&'extrinsics [u8]> { - with_info!(&self.info => { - info.info - .signature_payload() - .map(|s| &self.bytes[s.address_range()]) - }) - } - - /// Returns Some(signature_bytes) if the extrinsic was signed otherwise None is returned. - pub fn signature_bytes(&self) -> Option<&'extrinsics [u8]> { - with_info!(&self.info => { - info.info - .signature_payload() - .map(|s| &self.bytes[s.signature_range()]) - }) - } - - /// Get information about the transaction extensions of this extrinsic. - pub fn transaction_extensions( - &self, - ) -> Option> { - ExtrinsicTransactionParams::new(self.bytes, self.info) - } -} diff --git a/historic/src/lib.rs b/historic/src/lib.rs deleted file mode 100644 index 8c15ab2e6f..0000000000 --- a/historic/src/lib.rs +++ /dev/null @@ -1,27 +0,0 @@ -//! `subxt-historic` is a library for working with non head-of-chain data on Substrate-based blockchains. - -// TODO: Remove this when we're ready to release, and document everything! -#![allow(missing_docs)] - -mod utils; - -pub mod client; -pub mod config; -pub mod error; -pub mod extrinsics; -pub mod storage; - -pub use client::{OfflineClient, OnlineClient}; -pub use config::polkadot::PolkadotConfig; -pub use config::substrate::SubstrateConfig; -pub use error::Error; - -/// External types and crates that may be useful. -pub mod ext { - pub use futures::stream::{Stream, StreamExt}; -} - -/// Helper types that could be useful. -pub mod helpers { - pub use crate::utils::{AnyResolver, AnyResolverError, AnyTypeId}; -} diff --git a/historic/src/storage.rs b/historic/src/storage.rs deleted file mode 100644 index cf09303c47..0000000000 --- a/historic/src/storage.rs +++ /dev/null @@ -1,350 +0,0 @@ -mod list_storage_entries_any; -mod storage_entry; -mod storage_info; -mod storage_key; -mod storage_value; - -use crate::client::{OfflineClientAtBlockT, OnlineClientAtBlockT}; -use crate::config::Config; -use crate::error::StorageError; -use crate::storage::storage_info::with_info; -use std::borrow::Cow; -use std::sync::Arc; -use storage_info::AnyStorageInfo; - -pub use storage_entry::StorageEntry; -pub use storage_key::{StorageHasher, StorageKey, StorageKeyPart}; -pub use storage_value::StorageValue; -// We take how storage keys can be passed in from `frame-decode`, so re-export here. -pub use frame_decode::storage::{EncodableValues, IntoEncodableValues}; - -/// Work with storage. -pub struct StorageClient<'atblock, Client, T> { - client: &'atblock Client, - marker: std::marker::PhantomData, -} - -impl<'atblock, Client, T> StorageClient<'atblock, Client, T> { - /// Work with storage. - pub(crate) fn new(client: &'atblock Client) -> Self { - Self { - client, - marker: std::marker::PhantomData, - } - } -} - -// Things that we can do offline with storage. -impl<'atblock, Client, T> StorageClient<'atblock, Client, T> -where - T: Config + 'atblock, - Client: OfflineClientAtBlockT<'atblock, T>, -{ - /// Select the storage entry you'd like to work with. - pub fn entry( - &self, - pallet_name: impl Into, - entry_name: impl Into, - ) -> Result, StorageError> { - let pallet_name = pallet_name.into(); - let entry_name = entry_name.into(); - - let storage_info = AnyStorageInfo::new( - &pallet_name, - &entry_name, - self.client.metadata(), - self.client.legacy_types(), - )?; - - Ok(StorageEntryClient { - client: self.client, - pallet_name, - entry_name, - info: Arc::new(storage_info), - marker: std::marker::PhantomData, - }) - } - - /// Iterate over all of the storage entries listed in the metadata for the current block. This does **not** include well known - /// storage entries like `:code` which are not listed in the metadata. - pub fn entries(&self) -> impl Iterator> { - let client = self.client; - let metadata = client.metadata(); - - let mut pallet_name = Cow::Borrowed(""); - list_storage_entries_any::list_storage_entries_any(metadata).filter_map(move |entry| { - match entry { - frame_decode::storage::StorageEntry::In(name) => { - // Set the pallet name for upcoming entries: - pallet_name = name; - None - } - frame_decode::storage::StorageEntry::Name(entry_name) => { - // Output each entry with the last seen pallet name: - Some(StorageEntriesItem { - pallet_name: pallet_name.clone(), - entry_name, - client: self.client, - marker: std::marker::PhantomData, - }) - } - } - }) - } -} - -/// Working with a specific storage entry. -pub struct StorageEntriesItem<'atblock, Client, T> { - pallet_name: Cow<'atblock, str>, - entry_name: Cow<'atblock, str>, - client: &'atblock Client, - marker: std::marker::PhantomData, -} - -impl<'atblock, Client, T> StorageEntriesItem<'atblock, Client, T> -where - T: Config + 'atblock, - Client: OfflineClientAtBlockT<'atblock, T>, -{ - /// The pallet name. - pub fn pallet_name(&self) -> &str { - &self.pallet_name - } - - /// The storage entry name. - pub fn entry_name(&self) -> &str { - &self.entry_name - } - - /// Extract the relevant storage information so that we can work with this entry. - pub fn entry(&self) -> Result, StorageError> { - StorageClient { - client: self.client, - marker: std::marker::PhantomData, - } - .entry(&*self.pallet_name, &*self.entry_name) - } -} - -/// A client for working with a specific storage entry. -pub struct StorageEntryClient<'atblock, Client, T> { - client: &'atblock Client, - pallet_name: String, - entry_name: String, - info: Arc>, - marker: std::marker::PhantomData, -} - -impl<'atblock, Client, T> StorageEntryClient<'atblock, Client, T> -where - T: Config + 'atblock, - Client: OfflineClientAtBlockT<'atblock, T>, -{ - /// Get the pallet name. - pub fn pallet_name(&self) -> &str { - &self.pallet_name - } - - /// Get the storage entry name. - pub fn entry_name(&self) -> &str { - &self.entry_name - } - - /// The key which points to this storage entry (but not necessarily any values within it). - pub fn key_prefix(&self) -> [u8; 32] { - let pallet_name = &*self.pallet_name; - let entry_name = &*self.entry_name; - - frame_decode::storage::encode_storage_key_prefix(pallet_name, entry_name) - } - - /// Return the default value for this storage entry, if there is one. Returns `None` if there - /// is no default value. - pub fn default_value(&self) -> Option> { - with_info!(info = &*self.info => { - info.info.default_value.as_ref().map(|default_value| { - StorageValue::new(self.info.clone(), default_value.clone()) - }) - }) - } -} - -impl<'atblock, Client, T> StorageEntryClient<'atblock, Client, T> -where - T: Config + 'atblock, - Client: OnlineClientAtBlockT<'atblock, T>, -{ - /// Fetch a specific key in this map. If the number of keys provided is not equal - /// to the number of keys required to fetch a single value from the map, then an error - /// will be emitted. If no value exists but there is a default value for this storage - /// entry, then the default value will be returned. Else, `None` will be returned. - pub async fn fetch( - &self, - keys: Keys, - ) -> Result>, StorageError> { - let expected_num_keys = with_info!(info = &*self.info => { - info.info.keys.len() - }); - - // For fetching, we need exactly as many keys as exist for a storage entry. - if expected_num_keys != keys.num_encodable_values() { - return Err(StorageError::WrongNumberOfKeysProvidedForFetch { - num_keys_provided: keys.num_encodable_values(), - num_keys_expected: expected_num_keys, - }); - } - - let key_bytes = self.key(keys)?; - let info = self.info.clone(); - let value = fetch(self.client, &key_bytes) - .await? - .map(|bytes| StorageValue::new(info, Cow::Owned(bytes))) - .or_else(|| self.default_value()); - - Ok(value) - } - - /// Iterate over the values underneath the provided keys. - pub async fn iter( - &self, - keys: Keys, - ) -> Result< - impl futures::Stream, StorageError>> - + Unpin - + use<'atblock, Client, T, Keys>, - StorageError, - > { - use futures::stream::StreamExt; - use subxt_rpcs::methods::chain_head::{ - ArchiveStorageEvent, ArchiveStorageQuery, StorageQueryType, - }; - - let expected_num_keys = with_info!(info = &*self.info => { - info.info.keys.len() - }); - - // For iterating, we need at most one less key than the number that exists for a storage entry. - // TODO: The error message will be confusing if == keys are provided! - if keys.num_encodable_values() >= expected_num_keys { - return Err(StorageError::TooManyKeysProvidedForIter { - num_keys_provided: keys.num_encodable_values(), - max_keys_expected: expected_num_keys - 1, - }); - } - - let block_hash = self.client.block_hash(); - let key_bytes = self.key(keys)?; - - let items = std::iter::once(ArchiveStorageQuery { - key: &*key_bytes, - query_type: StorageQueryType::DescendantsValues, - pagination_start_key: None, - }); - - let sub = self - .client - .rpc_methods() - .archive_v1_storage(block_hash.into(), items, None) - .await - .map_err(|e| StorageError::RpcError { reason: e })?; - - let info = self.info.clone(); - let sub = sub.filter_map(move |item| { - let info = info.clone(); - async move { - let item = match item { - Ok(ArchiveStorageEvent::Item(item)) => item, - Ok(ArchiveStorageEvent::Error(err)) => { - return Some(Err(StorageError::StorageEventError { reason: err.error })); - } - Ok(ArchiveStorageEvent::Done) => return None, - Err(e) => return Some(Err(StorageError::RpcError { reason: e })), - }; - - item.value - .map(|value| Ok(StorageEntry::new(info, item.key.0, Cow::Owned(value.0)))) - } - }); - - Ok(Box::pin(sub)) - } - - // Encode a storage key for this storage entry to bytes. The key can be a partial key - // (i.e there are still multiple values below it) or a complete key that points to a specific value. - // - // Dev note: We don't have any functions that can take an already-encoded key and fetch an entry from - // it yet, so we don't expose this. If we did expose it, we might want to return some struct that wraps - // the key bytes and some metadata about them. Or maybe just fetch_raw and iter_raw. - fn key(&self, keys: Keys) -> Result, StorageError> { - with_info!(info = &*self.info => { - let key_bytes = frame_decode::storage::encode_storage_key_with_info( - &self.pallet_name, - &self.entry_name, - keys, - &info.info, - info.resolver, - ).map_err(|e| StorageError::KeyEncodeError { reason: e })?; - Ok(key_bytes) - }) - } -} - -// Fetch a single storage value by its key. -async fn fetch<'atblock, Client, T>( - client: &Client, - key_bytes: &[u8], -) -> Result>, StorageError> -where - T: Config + 'atblock, - Client: OnlineClientAtBlockT<'atblock, T>, -{ - use subxt_rpcs::methods::chain_head::{ - ArchiveStorageEvent, ArchiveStorageQuery, StorageQueryType, - }; - - let query = ArchiveStorageQuery { - key: key_bytes, - query_type: StorageQueryType::Value, - pagination_start_key: None, - }; - - let mut response_stream = client - .rpc_methods() - .archive_v1_storage(client.block_hash().into(), std::iter::once(query), None) - .await - .map_err(|e| StorageError::RpcError { reason: e })?; - - let value = response_stream - .next() - .await - .transpose() - .map_err(|e| StorageError::RpcError { reason: e })?; - - // No value found. - let Some(value) = value else { - return Ok(None); - }; - - let item = match value { - ArchiveStorageEvent::Item(item) => item, - // if it errors, return the error: - ArchiveStorageEvent::Error(err) => { - return Err(StorageError::StorageEventError { reason: err.error }); - } - // if it's done, it means no value was returned: - ArchiveStorageEvent::Done => return Ok(None), - }; - - // This shouldn't happen, but if it does, the value we wanted wasn't found. - if item.key.0 != key_bytes { - return Ok(None); - } - - // The bytes for the storage value. If this is None, then the API is misbehaving, - // ot no matching value was found. - let Some(value_bytes) = item.value else { - return Ok(None); - }; - - Ok(Some(value_bytes.0)) -} diff --git a/historic/src/storage/list_storage_entries_any.rs b/historic/src/storage/list_storage_entries_any.rs deleted file mode 100644 index e97bafd748..0000000000 --- a/historic/src/storage/list_storage_entries_any.rs +++ /dev/null @@ -1,35 +0,0 @@ -use frame_decode::storage::StorageEntryInfo; -use frame_metadata::RuntimeMetadata; - -pub use frame_decode::storage::StorageEntry; - -/// Returns an iterator listing the available storage entries in some metadata. -/// -/// This basically calls [`StorageEntryInfo::storage_entries()`] for each metadata version, -/// returning an empty iterator where applicable (ie when passing legacy metadata and the -/// `legacy` features flag is not enabled). -pub fn list_storage_entries_any( - metadata: &RuntimeMetadata, -) -> impl Iterator> { - match metadata { - RuntimeMetadata::V0(_deprecated_metadata) - | RuntimeMetadata::V1(_deprecated_metadata) - | RuntimeMetadata::V2(_deprecated_metadata) - | RuntimeMetadata::V3(_deprecated_metadata) - | RuntimeMetadata::V4(_deprecated_metadata) - | RuntimeMetadata::V5(_deprecated_metadata) - | RuntimeMetadata::V6(_deprecated_metadata) - | RuntimeMetadata::V7(_deprecated_metadata) => { - Box::new(core::iter::empty()) as Box>> - } - RuntimeMetadata::V8(m) => Box::new(m.storage_entries()), - RuntimeMetadata::V9(m) => Box::new(m.storage_entries()), - RuntimeMetadata::V10(m) => Box::new(m.storage_entries()), - RuntimeMetadata::V11(m) => Box::new(m.storage_entries()), - RuntimeMetadata::V12(m) => Box::new(m.storage_entries()), - RuntimeMetadata::V13(m) => Box::new(m.storage_entries()), - RuntimeMetadata::V14(m) => Box::new(m.storage_entries()), - RuntimeMetadata::V15(m) => Box::new(m.storage_entries()), - RuntimeMetadata::V16(m) => Box::new(m.storage_entries()), - } -} diff --git a/historic/src/storage/storage_entry.rs b/historic/src/storage/storage_entry.rs deleted file mode 100644 index 90aa0f6840..0000000000 --- a/historic/src/storage/storage_entry.rs +++ /dev/null @@ -1,48 +0,0 @@ -use super::storage_info::AnyStorageInfo; -use super::storage_key::StorageKey; -use super::storage_value::StorageValue; -use crate::error::StorageKeyError; -use std::borrow::Cow; -use std::sync::Arc; - -/// This represents a storage entry, which is a key-value pair in the storage. -pub struct StorageEntry<'atblock> { - key: Vec, - // This contains the storage information already: - value: StorageValue<'atblock>, -} - -impl<'atblock> StorageEntry<'atblock> { - /// Create a new storage entry. - pub fn new( - info: Arc>, - key: Vec, - value: Cow<'atblock, [u8]>, - ) -> Self { - Self { - key, - value: StorageValue::new(info, value), - } - } - - /// Get the raw bytes for this storage entry's key. - pub fn key_bytes(&self) -> &[u8] { - &self.key - } - - /// Consume this storage entry and return the raw bytes for the key and value. - pub fn into_key_and_value_bytes(self) -> (Vec, Vec) { - (self.key, self.value.into_bytes()) - } - - /// Decode the key for this storage entry. This gives back a type from which we can - /// decode specific parts of the key hash (where applicable). - pub fn key(&'_ self) -> Result, StorageKeyError> { - StorageKey::new(&self.value.info, &self.key) - } - - /// Return the storage value. - pub fn value(&self) -> &StorageValue<'atblock> { - &self.value - } -} diff --git a/historic/src/storage/storage_info.rs b/historic/src/storage/storage_info.rs deleted file mode 100644 index 685a0ff766..0000000000 --- a/historic/src/storage/storage_info.rs +++ /dev/null @@ -1,102 +0,0 @@ -use crate::error::StorageError; -use frame_decode::storage::StorageTypeInfo; -use frame_metadata::RuntimeMetadata; -use scale_info_legacy::{LookupName, TypeRegistrySet}; - -pub enum AnyStorageInfo<'atblock> { - Legacy(StorageInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>), - Current(StorageInfo<'atblock, u32, scale_info::PortableRegistry>), -} - -impl<'atblock> AnyStorageInfo<'atblock> { - /// For a slice of storage entries, return a vec of information about each one. - pub fn new( - pallet_name: &str, - entry_name: &str, - metadata: &'atblock RuntimeMetadata, - legacy_types: &'atblock TypeRegistrySet<'atblock>, - ) -> Result { - let info = match metadata { - RuntimeMetadata::V8(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types), - RuntimeMetadata::V9(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types), - RuntimeMetadata::V10(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types), - RuntimeMetadata::V11(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types), - RuntimeMetadata::V12(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types), - RuntimeMetadata::V13(m) => storage_info_inner(pallet_name, entry_name, m, legacy_types), - RuntimeMetadata::V14(m) => storage_info_inner(pallet_name, entry_name, m, &m.types), - RuntimeMetadata::V15(m) => storage_info_inner(pallet_name, entry_name, m, &m.types), - RuntimeMetadata::V16(m) => storage_info_inner(pallet_name, entry_name, m, &m.types), - unknown => { - return Err(StorageError::UnsupportedMetadataVersion { - version: unknown.version(), - }); - } - }?; - - fn storage_info_inner<'atblock, Info, Resolver>( - pallet_name: &str, - entry_name: &str, - m: &'atblock Info, - type_resolver: &'atblock Resolver, - ) -> Result, StorageError> - where - Info: StorageTypeInfo, - Resolver: scale_type_resolver::TypeResolver, - AnyStorageInfo<'atblock>: From>, - { - m.storage_info(pallet_name, entry_name) - .map(|frame_storage_info| { - let info = StorageInfo { - info: frame_storage_info, - resolver: type_resolver, - }; - AnyStorageInfo::from(info) - }) - .map_err(|e| StorageError::ExtractStorageInfoError { - reason: e.into_owned(), - }) - } - - Ok(info) - } - - /// Is the storage entry a map (ie something we'd provide extra keys to access a value, or otherwise iterate over)? - pub fn is_map(&self) -> bool { - match self { - AnyStorageInfo::Legacy(info) => !info.info.keys.is_empty(), - AnyStorageInfo::Current(info) => !info.info.keys.is_empty(), - } - } -} - -impl<'atblock> From>> - for AnyStorageInfo<'atblock> -{ - fn from(info: StorageInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>) -> Self { - AnyStorageInfo::Legacy(info) - } -} -impl<'atblock> From> - for AnyStorageInfo<'atblock> -{ - fn from(info: StorageInfo<'atblock, u32, scale_info::PortableRegistry>) -> Self { - AnyStorageInfo::Current(info) - } -} - -pub struct StorageInfo<'atblock, TypeId: Clone, Resolver> { - pub info: frame_decode::storage::StorageInfo<'atblock, TypeId>, - pub resolver: &'atblock Resolver, -} - -macro_rules! with_info { - ($info:ident = $original_info:expr => $fn:expr) => {{ - #[allow(clippy::clone_on_copy)] - let info = match $original_info { - AnyStorageInfo::Legacy($info) => $fn, - AnyStorageInfo::Current($info) => $fn, - }; - info - }}; -} -pub(crate) use with_info; diff --git a/historic/src/storage/storage_key.rs b/historic/src/storage/storage_key.rs deleted file mode 100644 index cbabe0e6a0..0000000000 --- a/historic/src/storage/storage_key.rs +++ /dev/null @@ -1,176 +0,0 @@ -use super::AnyStorageInfo; -use crate::{error::StorageKeyError, storage::storage_info::with_info}; -use scale_info_legacy::{LookupName, TypeRegistrySet}; - -// This is part of our public interface. -pub use frame_decode::storage::{IntoDecodableValues, StorageHasher}; - -enum AnyStorageKeyInfo<'atblock> { - Legacy(StorageKeyInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>), - Current(StorageKeyInfo<'atblock, u32, scale_info::PortableRegistry>), -} - -impl<'atblock> From>> - for AnyStorageKeyInfo<'atblock> -{ - fn from(info: StorageKeyInfo<'atblock, LookupName, TypeRegistrySet<'atblock>>) -> Self { - AnyStorageKeyInfo::Legacy(info) - } -} -impl<'atblock> From> - for AnyStorageKeyInfo<'atblock> -{ - fn from(info: StorageKeyInfo<'atblock, u32, scale_info::PortableRegistry>) -> Self { - AnyStorageKeyInfo::Current(info) - } -} - -struct StorageKeyInfo<'atblock, TypeId, Resolver> { - info: frame_decode::storage::StorageKey, - resolver: &'atblock Resolver, -} - -macro_rules! with_key_info { - ($info:ident = $original_info:expr => $fn:expr) => {{ - #[allow(clippy::clone_on_copy)] - let info = match $original_info { - AnyStorageKeyInfo::Legacy($info) => $fn, - AnyStorageKeyInfo::Current($info) => $fn, - }; - info - }}; -} - -/// This represents the different parts of a storage key. -pub struct StorageKey<'entry, 'atblock> { - info: AnyStorageKeyInfo<'atblock>, - bytes: &'entry [u8], -} - -impl<'entry, 'atblock> StorageKey<'entry, 'atblock> { - pub(crate) fn new( - info: &AnyStorageInfo<'atblock>, - bytes: &'entry [u8], - ) -> Result { - with_info!(info = info => { - let cursor = &mut &*bytes; - let storage_key_info = frame_decode::storage::decode_storage_key_with_info( - cursor, - &info.info, - info.resolver, - ).map_err(|e| { - StorageKeyError::DecodeError { reason: e.map_type_id(|id| id.to_string()) } - })?; - - if !cursor.is_empty() { - return Err(StorageKeyError::LeftoverBytes { - leftover_bytes: cursor.to_vec(), - }); - } - - Ok(StorageKey { - info: StorageKeyInfo { - info: storage_key_info, - resolver: info.resolver, - }.into(), - bytes, - }) - }) - } - - /// Attempt to decode the values contained within this storage key to the `Target` type - /// provided. This type is typically a tuple of types which each implement [`scale_decode::DecodeAsType`] - /// and correspond to each of the key types present, in order. - pub fn decode_as(&self) -> Result { - with_key_info!(info = &self.info => { - let values = frame_decode::storage::decode_storage_key_values( - self.bytes, - &info.info, - info.resolver - ).map_err(|e| { - StorageKeyError::DecodeKeyValueError { reason: e } - })?; - - Ok(values) - }) - } - - /// Iterate over the parts of this storage key. Each part of a storage key corresponds to a - /// single value that has been hashed. - pub fn parts(&'_ self) -> impl ExactSizeIterator> { - let parts_len = with_key_info!(info = &self.info => info.info.parts().len()); - (0..parts_len).map(move |index| StorageKeyPart { - index, - info: &self.info, - bytes: self.bytes, - }) - } - - /// Return the part of the storage key at the provided index, or `None` if the index is out of bounds. - pub fn part(&self, index: usize) -> Option> { - if index < self.parts().len() { - Some(StorageKeyPart { - index, - info: &self.info, - bytes: self.bytes, - }) - } else { - None - } - } -} - -/// This represents a part of a storage key. -pub struct StorageKeyPart<'key, 'entry, 'atblock> { - index: usize, - info: &'key AnyStorageKeyInfo<'atblock>, - bytes: &'entry [u8], -} - -impl<'key, 'entry, 'atblock> StorageKeyPart<'key, 'entry, 'atblock> { - /// Get the raw bytes for this part of the storage key. - pub fn bytes(&self) -> &'entry [u8] { - with_key_info!(info = &self.info => { - let part = &info.info[self.index]; - let hash_range = part.hash_range(); - let value_range = part - .value() - .map(|v| v.range()) - .unwrap_or(std::ops::Range { start: hash_range.end, end: hash_range.end }); - let combined_range = std::ops::Range { - start: hash_range.start, - end: value_range.end, - }; - &self.bytes[combined_range] - }) - } - - /// Get the hasher that was used to construct this part of the storage key. - pub fn hasher(&self) -> StorageHasher { - with_key_info!(info = &self.info => info.info[self.index].hasher()) - } - - /// For keys that were produced using "concat" or "identity" hashers, the value - /// is available as a part of the key hash, allowing us to decode it into anything - /// implementing [`scale_decode::DecodeAsType`]. If the key was produced using a - /// different hasher, this will return `None`. - pub fn decode_as(&self) -> Result, StorageKeyError> { - with_key_info!(info = &self.info => { - let part_info = &info.info[self.index]; - let Some(value_info) = part_info.value() else { - return Ok(None); - }; - - let value_bytes = &self.bytes[value_info.range()]; - let value_ty = value_info.ty().clone(); - - let decoded_key_part = T::decode_as_type( - &mut &*value_bytes, - value_ty, - info.resolver, - ).map_err(|e| StorageKeyError::DecodePartError { index: self.index, reason: e })?; - - Ok(Some(decoded_key_part)) - }) - } -} diff --git a/historic/src/storage/storage_value.rs b/historic/src/storage/storage_value.rs deleted file mode 100644 index 933cf3a233..0000000000 --- a/historic/src/storage/storage_value.rs +++ /dev/null @@ -1,79 +0,0 @@ -use super::storage_info::AnyStorageInfo; -use super::storage_info::with_info; -use crate::error::StorageValueError; -use crate::utils::{AnyResolver, AnyTypeId}; -use scale_decode::DecodeAsType; -use std::borrow::Cow; -use std::sync::Arc; - -/// This represents a storage value. -pub struct StorageValue<'atblock> { - pub(crate) info: Arc>, - bytes: Cow<'atblock, [u8]>, - resolver: AnyResolver<'atblock, 'atblock>, -} - -impl<'atblock> StorageValue<'atblock> { - /// Create a new storage value. - pub(crate) fn new(info: Arc>, bytes: Cow<'atblock, [u8]>) -> Self { - let resolver = match &*info { - AnyStorageInfo::Current(info) => AnyResolver::A(info.resolver), - AnyStorageInfo::Legacy(info) => AnyResolver::B(info.resolver), - }; - - Self { - info, - bytes, - resolver, - } - } - - /// Get the raw bytes for this storage value. - pub fn bytes(&self) -> &[u8] { - &self.bytes - } - - /// Consume this storage value and return the raw bytes. - pub fn into_bytes(self) -> Vec { - self.bytes.to_vec() - } - - /// Visit the given field with a [`scale_decode::visitor::Visitor`]. This is like a lower level - /// version of [`StorageValue::decode_as`], as the visitor is able to preserve lifetimes - /// and has access to more type information than is available via [`StorageValue::decode_as`]. - pub fn visit< - V: scale_decode::visitor::Visitor>, - >( - &self, - visitor: V, - ) -> Result, V::Error> { - let type_id = match &*self.info { - AnyStorageInfo::Current(info) => AnyTypeId::A(info.info.value_id), - AnyStorageInfo::Legacy(info) => AnyTypeId::B(info.info.value_id.clone()), - }; - let cursor = &mut self.bytes(); - - scale_decode::visitor::decode_with_visitor(cursor, type_id, &self.resolver, visitor) - } - - /// Decode this storage value. - pub fn decode_as(&self) -> Result { - with_info!(info = &*self.info => { - let cursor = &mut &*self.bytes; - - let value = T::decode_as_type( - cursor, - info.info.value_id.clone(), - info.resolver, - ).map_err(|e| StorageValueError::DecodeError { reason: e })?; - - if !cursor.is_empty() { - return Err(StorageValueError::LeftoverBytes { - leftover_bytes: cursor.to_vec(), - }); - } - - Ok(value) - }) - } -} diff --git a/historic/src/utils.rs b/historic/src/utils.rs deleted file mode 100644 index 4a4edf859b..0000000000 --- a/historic/src/utils.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod any_resolver; -mod either; -mod range_map; - -pub use any_resolver::{AnyResolver, AnyResolverError, AnyTypeId}; -pub use either::Either; -pub use range_map::RangeMap; diff --git a/historic/src/utils/any_resolver.rs b/historic/src/utils/any_resolver.rs deleted file mode 100644 index c65b6a1b46..0000000000 --- a/historic/src/utils/any_resolver.rs +++ /dev/null @@ -1,186 +0,0 @@ -use super::Either; -use scale_info_legacy::LookupName; -use scale_type_resolver::ResolvedTypeVisitor; - -/// A type resolver which could either be for modern or historic resolving. -pub type AnyResolver<'a, 'b> = - Either<&'a scale_info::PortableRegistry, &'a scale_info_legacy::TypeRegistrySet<'b>>; - -/// A type ID which is either a modern or historic ID. -pub type AnyTypeId = Either; - -impl Default for AnyTypeId { - fn default() -> Self { - // Not a sensible default, but we don't need / can't provide a sensible one. - AnyTypeId::A(u32::MAX) - } -} -impl From for AnyTypeId { - fn from(value: u32) -> Self { - AnyTypeId::A(value) - } -} -impl From for AnyTypeId { - fn from(value: LookupName) -> Self { - AnyTypeId::B(value) - } -} -impl TryFrom for u32 { - type Error = (); - fn try_from(value: AnyTypeId) -> Result { - match value { - AnyTypeId::A(v) => Ok(v), - AnyTypeId::B(_) => Err(()), - } - } -} -impl TryFrom for LookupName { - type Error = (); - fn try_from(value: AnyTypeId) -> Result { - match value { - AnyTypeId::A(_) => Err(()), - AnyTypeId::B(v) => Ok(v), - } - } -} - -/// A resolve error that comes from using [`AnyResolver`] to resolve some [`AnyTypeId`] into a type. -#[derive(Debug, thiserror::Error)] -pub enum AnyResolverError { - #[error("got a {got} type ID but expected a {expected} type ID")] - TypeIdMismatch { - got: &'static str, - expected: &'static str, - }, - #[error("{0}")] - ScaleInfo(scale_type_resolver::portable_registry::Error), - #[error("{0}")] - ScaleInfoLegacy(scale_info_legacy::type_registry::TypeRegistryResolveError), -} - -impl<'a, 'b> scale_type_resolver::TypeResolver for AnyResolver<'a, 'b> { - type TypeId = AnyTypeId; - type Error = AnyResolverError; - - fn resolve_type<'this, V: ResolvedTypeVisitor<'this, TypeId = Self::TypeId>>( - &'this self, - type_id: Self::TypeId, - visitor: V, - ) -> Result { - match (self, type_id) { - (Either::A(resolver), Either::A(id)) => resolver - .resolve_type(id, ModernVisitor(visitor)) - .map_err(AnyResolverError::ScaleInfo), - (Either::B(resolver), Either::B(id)) => resolver - .resolve_type(id, LegacyVisitor(visitor)) - .map_err(AnyResolverError::ScaleInfoLegacy), - (Either::A(_), Either::B(_)) => Err(AnyResolverError::TypeIdMismatch { - got: "LookupName", - expected: "u32", - }), - (Either::B(_), Either::A(_)) => Err(AnyResolverError::TypeIdMismatch { - got: "u32", - expected: "LookupName", - }), - } - } -} - -// We need to have a visitor which understands only modern or legacy types, and can wrap the more generic visitor -// that must be provided to AnyResolver::resolve_type. This then allows us to visit historic _or_ modern types -// using the single visitor provided by the user. -struct LegacyVisitor(V); -struct ModernVisitor(V); - -mod impls { - use super::{AnyTypeId, LegacyVisitor, LookupName, ModernVisitor}; - use scale_type_resolver::*; - - // An ugly implementation which maps from modern or legacy types into our AnyTypeId, - // to make LegacyVisitor and ModernVisitor valid visitors when wrapping a generic "any" visitor. - macro_rules! impl_visitor_mapper { - ($struc:ident, $type_id_ty:ident, $variant:ident) => { - impl<'this, V> ResolvedTypeVisitor<'this> for $struc - where - V: ResolvedTypeVisitor<'this, TypeId = AnyTypeId>, - { - type TypeId = $type_id_ty; - type Value = V::Value; - - fn visit_unhandled(self, kind: UnhandledKind) -> Self::Value { - self.0.visit_unhandled(kind) - } - fn visit_array(self, type_id: Self::TypeId, len: usize) -> Self::Value { - self.0.visit_array(AnyTypeId::$variant(type_id), len) - } - fn visit_not_found(self) -> Self::Value { - self.0.visit_not_found() - } - fn visit_composite(self, path: Path, fields: Fields) -> Self::Value - where - Path: PathIter<'this>, - Fields: FieldIter<'this, Self::TypeId>, - { - self.0.visit_composite( - path, - fields.map(|field| Field { - name: field.name, - id: AnyTypeId::$variant(field.id), - }), - ) - } - fn visit_variant(self, path: Path, variants: Var) -> Self::Value - where - Path: PathIter<'this>, - Fields: FieldIter<'this, Self::TypeId>, - Var: VariantIter<'this, Fields>, - { - self.0.visit_variant( - path, - variants.map(|variant| Variant { - index: variant.index, - name: variant.name, - fields: variant.fields.map(|field| Field { - name: field.name, - id: AnyTypeId::$variant(field.id), - }), - }), - ) - } - fn visit_sequence(self, path: Path, type_id: Self::TypeId) -> Self::Value - where - Path: PathIter<'this>, - { - self.0.visit_sequence(path, AnyTypeId::$variant(type_id)) - } - - fn visit_tuple(self, type_ids: TypeIds) -> Self::Value - where - TypeIds: ExactSizeIterator, - { - self.0 - .visit_tuple(type_ids.map(|id| AnyTypeId::$variant(id))) - } - - fn visit_primitive(self, primitive: Primitive) -> Self::Value { - self.0.visit_primitive(primitive) - } - - fn visit_compact(self, type_id: Self::TypeId) -> Self::Value { - self.0.visit_compact(AnyTypeId::$variant(type_id)) - } - - fn visit_bit_sequence( - self, - store_format: BitsStoreFormat, - order_format: BitsOrderFormat, - ) -> Self::Value { - self.0.visit_bit_sequence(store_format, order_format) - } - } - }; - } - - impl_visitor_mapper!(ModernVisitor, u32, A); - impl_visitor_mapper!(LegacyVisitor, LookupName, B); -} diff --git a/historic/src/utils/either.rs b/historic/src/utils/either.rs deleted file mode 100644 index 081b52e92e..0000000000 --- a/historic/src/utils/either.rs +++ /dev/null @@ -1,49 +0,0 @@ -macro_rules! either { - ($name:ident( $fst:ident, $($variant:ident),* )) => { - #[derive(Clone, Copy, Debug)] - pub enum $name<$fst, $($variant),*> { - $fst($fst), - $($variant($variant),)* - } - - impl<$fst, $($variant),*> Iterator for $name<$fst, $($variant),*> - where - $fst: Iterator, - $($variant: Iterator,)* - { - type Item = $fst::Item; - - fn next(&mut self) -> Option { - match self { - $name::$fst(inner) => inner.next(), - $( $name::$variant(inner) => inner.next(), )* - } - } - } - - impl <$fst, $($variant),*> futures::stream::Stream for $name<$fst, $($variant),*> - where - $fst: futures::stream::Stream, - $($variant: futures::stream::Stream,)* - { - type Item = $fst::Item; - - fn poll_next( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - use std::pin::Pin; - - // SAFETY: This is safe because we never move the inner value out of the Pin. - unsafe { - match self.get_unchecked_mut() { - $name::$fst(inner) => Pin::new_unchecked(inner).poll_next(cx), - $( $name::$variant(inner) => Pin::new_unchecked(inner).poll_next(cx), )* - } - } - } - } - } -} - -either!(Either(A, B)); diff --git a/historic/src/utils/range_map.rs b/historic/src/utils/range_map.rs deleted file mode 100644 index 269a2c5bcd..0000000000 --- a/historic/src/utils/range_map.rs +++ /dev/null @@ -1,154 +0,0 @@ -use std::fmt::Display; - -/// A map that associates ranges of keys with values. -#[derive(Debug, Clone)] -pub struct RangeMap { - // (range_start, range_ended, value). This is - // guaranteed to be sorted and have non-overlapping ranges. - mapping: Vec<(K, K, V)>, -} - -impl RangeMap { - /// Build an empty [`RangeMap`] as a placeholder. - pub fn empty() -> Self { - RangeMap { - mapping: Vec::new(), - } - } - - /// Build a [`RangeMap`]. - pub fn builder() -> RangeMapBuilder { - RangeMapBuilder { - mapping: Vec::new(), - } - } - - /// Return the value whose key is within the range, or None if not found. - pub fn get(&self, key: K) -> Option<&V> { - let idx = self - .mapping - .binary_search_by_key(&key, |&(start, end, _)| { - if key >= start && key < end { - key - } else { - start - } - }) - .ok()?; - - self.mapping.get(idx).map(|(_, _, val)| val) - } -} - -/// A builder for constructing a [`RangeMap`]. Use [``RangeMap::builder()`] to create one. -#[derive(Debug, Clone)] -pub struct RangeMapBuilder { - mapping: Vec<(K, K, V)>, -} - -impl RangeMapBuilder { - /// Try to add a range, mapping block numbers to a spec version. - /// - /// Returns an error if the range is empty or overlaps with an existing range. - pub fn try_add_range( - &mut self, - start: K, - end: K, - val: V, - ) -> Result<&mut Self, RangeMapError> { - let (start, end) = if start < end { - (start, end) - } else { - (end, start) - }; - - if start == end { - return Err(RangeMapError::EmptyRange(start)); - } - - if let Some(&(s, e, _)) = self.mapping.iter().find(|&&(s, e, _)| start < e && end > s) { - return Err(RangeMapError::OverlappingRanges { - proposed: (start, end), - existing: (s, e), - }); - } - - self.mapping.push((start, end, val)); - Ok(self) - } - - /// Add a range of blocks with the given spec version. - /// - /// # Panics - /// - /// This method will panic if the range is empty or overlaps with an existing range. - pub fn add_range(mut self, start: K, end: K, val: V) -> Self { - if let Err(e) = self.try_add_range(start, end, val) { - panic!("{e}") - } - self - } - - /// Finish adding ranges and build the [`RangeMap`]. - pub fn build(mut self) -> RangeMap { - self.mapping.sort_by_key(|&(start, _, _)| start); - RangeMap { - mapping: self.mapping, - } - } -} - -/// An error that can occur when calling [`RangeMapBuilder::try_add_range()`]. -#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] -pub enum RangeMapError { - /// An error indicating that the proposed block range is empty. - #[error("Block range cannot be empty: start and end values must be different, but got {} for both", .0)] - EmptyRange(K), - /// An error indicating that the proposed block range overlaps with an existing one. - #[error("Overlapping block ranges are not allowed: proposed range is {}..{}, but we already have {}..{}", proposed.0, proposed.1, existing.0, existing.1)] - OverlappingRanges { proposed: (K, K), existing: (K, K) }, -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn test_rangemap_get() { - let spec_version = RangeMap::builder() - .add_range(0, 100, 1) - .add_range(100, 200, 2) - .add_range(200, 300, 3) - .build(); - - assert_eq!(spec_version.get(0), Some(&1)); - assert_eq!(spec_version.get(50), Some(&1)); - assert_eq!(spec_version.get(100), Some(&2)); - assert_eq!(spec_version.get(150), Some(&2)); - assert_eq!(spec_version.get(200), Some(&3)); - assert_eq!(spec_version.get(250), Some(&3)); - assert_eq!(spec_version.get(300), None); - } - - #[test] - fn test_rangemap_set() { - let mut spec_version = RangeMap::builder() - .add_range(0, 100, 1) - .add_range(200, 300, 3); - - assert_eq!( - spec_version.try_add_range(99, 130, 2).unwrap_err(), - RangeMapError::OverlappingRanges { - proposed: (99, 130), - existing: (0, 100), - } - ); - assert_eq!( - spec_version.try_add_range(170, 201, 2).unwrap_err(), - RangeMapError::OverlappingRanges { - proposed: (170, 201), - existing: (200, 300), - } - ); - } -} diff --git a/lightclient/src/lib.rs b/lightclient/src/lib.rs index 9f6cb1aace..15013090ef 100644 --- a/lightclient/src/lib.rs +++ b/lightclient/src/lib.rs @@ -6,7 +6,6 @@ //! to Substrate based chains. #![deny(missing_docs)] -#![cfg_attr(docsrs, feature(doc_cfg))] #[cfg(any( all(feature = "web", feature = "native"), diff --git a/new/Cargo.toml b/new/Cargo.toml deleted file mode 100644 index 97c558f665..0000000000 --- a/new/Cargo.toml +++ /dev/null @@ -1,175 +0,0 @@ -[package] -name = "subxt-new" -version.workspace = true -authors.workspace = true -edition.workspace = true -rust-version.workspace = true -publish = true - -license.workspace = true -readme = "../README.md" -repository.workspace = true -documentation.workspace = true -homepage.workspace = true -description = "Submit extrinsics (transactions) to a substrate node via RPC" -keywords = ["parity", "substrate", "blockchain"] - -[lints] -workspace = true - -[features] -# For dev and documentation reasons we enable more features than are often desired. -# it's recommended to use `--no-default-features` and then select what you need. -default = ["jsonrpsee", "native"] - -# Features that we expect to be enabled for documentation. -docs = [ - "default", - "unstable-light-client", - "runtime", - "reconnecting-rpc-client", -] - -# Enable this for native (ie non web/wasm builds). -# Exactly 1 of "web" and "native" is expected. -native = [ - "subxt-lightclient?/native", - "subxt-rpcs/native", - "tokio-util", - "tokio?/sync", - "sp-crypto-hashing/std", -] - -# Enable this for web/wasm builds. -# Exactly 1 of "web" and "native" is expected. -web = [ - "subxt-lightclient?/web", - "subxt-macro/web", - "subxt-rpcs/web", - "tokio?/sync", -] - -# Feature flag to enable the default future executor. -# Technically it's a hack enable to both but simplifies the conditional compilation -# and subxt is selecting executor based on the used platform. -# -# For instance `wasm-bindgen-futures` panics if the platform isn't wasm32 and -# similar for tokio that requires a tokio runtime to be initialized. -runtime = ["tokio/rt", "wasm-bindgen-futures"] - -# Enable this to use the reconnecting rpc client -reconnecting-rpc-client = ["subxt-rpcs/reconnecting-rpc-client"] - -# Enable this to use jsonrpsee, which enables the jsonrpsee RPC client, and -# a couple of util functions which rely on jsonrpsee. -jsonrpsee = [ - "dep:jsonrpsee", - "subxt-rpcs/jsonrpsee", - "runtime" -] - -# Enable this to fetch and utilize the latest unstable metadata from a node. -# The unstable metadata is subject to breaking changes and the subxt might -# fail to decode the metadata properly. Use this to experiment with the -# latest features exposed by the metadata. -unstable-metadata = [] - -# Activate this to expose the Light Client functionality. -# Note that this feature is experimental and things may break or not work as expected. -unstable-light-client = ["subxt-lightclient", "subxt-rpcs/unstable-light-client"] - -# Activate this to expose the ability to generate metadata from Wasm runtime files. -runtime-wasm-path = ["subxt-macro/runtime-wasm-path"] - -[dependencies] -async-trait = { workspace = true } -base58 = { workspace = true } -blake2 = { workspace = true } -codec = { package = "parity-scale-codec", workspace = true, features = ["derive"] } -derive-where = { workspace = true } -scale-info = { workspace = true, features = ["default"] } -scale-info-legacy = { workspace = true } -scale-value = { workspace = true, features = ["default"] } -scale-bits = { workspace = true, features = ["default"] } -scale-decode = { workspace = true, features = ["default"] } -scale-encode = { workspace = true, features = ["default"] } -futures = { workspace = true } -hex = { workspace = true } -impl-serde = { workspace = true, default-features = false } -keccak-hash = { workspace = true } -serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true, features = ["default", "raw_value"] } -sp-crypto-hashing = { workspace = true } -thiserror = { workspace = true } -tracing = { workspace = true } -frame-metadata = { workspace = true } -frame-decode = { workspace = true, features = ["legacy-types"] } -either = { workspace = true } -web-time = { workspace = true } - -# Provides some deserialization, types like U256/H256 and hashing impls like twox/blake256: -primitive-types = { workspace = true, features = ["codec", "scale-info", "serde"] } - -# Included if the "jsonrpsee" feature is enabled. -jsonrpsee = { workspace = true, optional = true, features = ["jsonrpsee-types"] } - -# Other subxt crates we depend on. -subxt-macro = { workspace = true } -subxt-metadata = { workspace = true, features = ["std", "legacy"] } -subxt-lightclient = { workspace = true, optional = true, default-features = false } -subxt-rpcs = { workspace = true } - -# For parsing urls to disallow insecure schemes -url = { workspace = true } - -# Included if "native" feature is enabled -tokio-util = { workspace = true, features = ["compat"], optional = true } - -# Included if the reconnecting rpc client feature is enabled -# Only the `tokio/sync` is used in the reconnecting rpc client -# and that compiles both for native and web. -tokio = { workspace = true, optional = true } -wasm-bindgen-futures = { workspace = true, optional = true } - -[dev-dependencies] -bitvec = { workspace = true } -codec = { workspace = true, features = ["derive", "bit-vec"] } -scale-info = { workspace = true, features = ["bit-vec"] } -tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread", "sync"] } -sp-core = { workspace = true, features = ["std"] } -sp-keyring = { workspace = true, features = ["std"] } -sp-runtime = { workspace = true, features = ["std"] } -assert_matches = { workspace = true } -subxt-signer = { path = "../signer", features = ["unstable-eth"] } -subxt-rpcs = { workspace = true, features = ["subxt", "mock-rpc-client"] } -# Tracing subscriber is useful for light-client examples to ensure that -# the `bootNodes` and chain spec are configured correctly. If all is fine, then -# the light-client will emit INFO logs with -# `GrandPa warp sync finished` and `Finalized block runtime ready.` -tracing-subscriber = { workspace = true } -# These deps are needed to test the reconnecting rpc client -jsonrpsee = { workspace = true, features = ["server"] } -tower = { workspace = true } -hyper = { workspace = true } -http-body = { workspace = true } - -[[example]] -name = "light_client_basic" -path = "examples/light_client_basic.rs" -required-features = ["unstable-light-client", "jsonrpsee"] - -[[example]] -name = "light_client_local_node" -path = "examples/light_client_local_node.rs" -required-features = ["unstable-light-client", "jsonrpsee", "native"] - -[[example]] -name = "setup_reconnecting_rpc_client" -path = "examples/setup_reconnecting_rpc_client.rs" -required-features = ["reconnecting-rpc-client"] - -[package.metadata.docs.rs] -features = ["docs"] - -[package.metadata.playground] -features = ["default", "unstable-light-client"] diff --git a/new/examples/block_decoding_dynamic.rs b/new/examples/block_decoding_dynamic.rs deleted file mode 100644 index 44ba483221..0000000000 --- a/new/examples/block_decoding_dynamic.rs +++ /dev/null @@ -1,43 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client that subscribes to blocks of the Polkadot network. - let api = OnlineClient::::from_url("wss://rpc.polkadot.io:443").await?; - - // Subscribe to all finalized blocks: - let mut blocks_sub = api.blocks().subscribe_finalized().await?; - while let Some(block) = blocks_sub.next().await { - let block = block?; - let block_number = block.header().number; - let block_hash = block.hash(); - println!("Block #{block_number} ({block_hash})"); - - // Decode each signed extrinsic in the block dynamically - let extrinsics = block.extrinsics().await?; - for ext in extrinsics.iter() { - let Some(transaction_extensions) = ext.transaction_extensions() else { - continue; // we do not look at inherents in this example - }; - - // Decode the fields into our dynamic Value type to display: - let fields = ext.decode_as_fields::()?; - - println!(" {}/{}", ext.pallet_name(), ext.call_name()); - println!(" Transaction Extensions:"); - for signed_ext in transaction_extensions.iter() { - // We only want to take a look at these 3 signed extensions, because the others all just have unit fields. - if ["CheckMortality", "CheckNonce", "ChargeTransactionPayment"] - .contains(&signed_ext.name()) - { - println!(" {}: {}", signed_ext.name(), signed_ext.value()?); - } - } - println!(" Fields:"); - println!(" {fields}\n"); - } - } - - Ok(()) -} diff --git a/new/examples/block_decoding_static.rs b/new/examples/block_decoding_static.rs deleted file mode 100644 index 9af696bab4..0000000000 --- a/new/examples/block_decoding_static.rs +++ /dev/null @@ -1,64 +0,0 @@ -#![allow(missing_docs)] -use subxt::{ - OnlineClient, PolkadotConfig, - utils::{AccountId32, MultiAddress}, -}; - -use codec::Decode; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -use polkadot::balances::calls::types::TransferKeepAlive; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client that subscribes to blocks of the Polkadot network. - let api = OnlineClient::::from_url("wss://rpc.polkadot.io:443").await?; - - // Subscribe to all finalized blocks: - let mut blocks_sub = api.blocks().subscribe_finalized().await?; - - // For each block, print details about the `TransferKeepAlive` transactions we are interested in. - while let Some(block) = blocks_sub.next().await { - let block = block?; - let block_number = block.header().number; - let block_hash = block.hash(); - println!("Block #{block_number} ({block_hash}):"); - - let extrinsics = block.extrinsics().await?; - for transfer in extrinsics.find::() { - let transfer = transfer?; - - let Some(extensions) = transfer.details.transaction_extensions() else { - panic!("TransferKeepAlive should be signed") - }; - - let addr_bytes = transfer - .details - .address_bytes() - .expect("TransferKeepAlive should be signed"); - let sender = MultiAddress::::decode(&mut &addr_bytes[..]) - .expect("Decoding should work"); - let sender = display_address(&sender); - let receiver = display_address(&transfer.value.dest); - let value = transfer.value.value; - let tip = extensions.tip().expect("Should have tip"); - let nonce = extensions.nonce().expect("Should have nonce"); - - println!( - " Transfer of {value} DOT:\n {sender} (Tip: {tip}, Nonce: {nonce}) ---> {receiver}", - ); - } - } - - Ok(()) -} - -fn display_address(addr: &MultiAddress) -> String { - if let MultiAddress::Id(id32) = addr { - format!("{id32}") - } else { - "MultiAddress::...".into() - } -} diff --git a/new/examples/blocks_subscribing.rs b/new/examples/blocks_subscribing.rs deleted file mode 100644 index f0f0a37d43..0000000000 --- a/new/examples/blocks_subscribing.rs +++ /dev/null @@ -1,63 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Subscribe to all finalized blocks: - let mut blocks_sub = api.blocks().subscribe_finalized().await?; - - // For each block, print a bunch of information about it: - while let Some(block) = blocks_sub.next().await { - let block = block?; - - let block_number = block.header().number; - let block_hash = block.hash(); - - println!("Block #{block_number}:"); - println!(" Hash: {block_hash}"); - println!(" Extrinsics:"); - - // Log each of the extrinsic with it's associated events: - let extrinsics = block.extrinsics().await?; - for ext in extrinsics.iter() { - let idx = ext.index(); - let events = ext.events().await?; - let bytes_hex = format!("0x{}", hex::encode(ext.bytes())); - - // See the API docs for more ways to decode extrinsics: - let decoded_ext = ext.as_root_extrinsic::(); - - println!(" Extrinsic #{idx}:"); - println!(" Bytes: {bytes_hex}"); - println!(" Decoded: {decoded_ext:?}"); - - println!(" Events:"); - for evt in events.iter() { - let evt = evt?; - let pallet_name = evt.pallet_name(); - let event_name = evt.variant_name(); - let event_values = evt.decode_as_fields::()?; - - println!(" {pallet_name}_{event_name}"); - println!(" {event_values}"); - } - - println!(" Transaction Extensions:"); - if let Some(transaction_extensions) = ext.transaction_extensions() { - for transaction_extension in transaction_extensions.iter() { - let name = transaction_extension.name(); - let value = transaction_extension.value()?.to_string(); - println!(" {name}: {value}"); - } - } - } - } - - Ok(()) -} diff --git a/new/examples/constants_dynamic.rs b/new/examples/constants_dynamic.rs deleted file mode 100644 index 2d4ed4c5d7..0000000000 --- a/new/examples/constants_dynamic.rs +++ /dev/null @@ -1,26 +0,0 @@ -#![allow(missing_docs)] -use subxt::dynamic::Value; -use subxt::{OnlineClient, PolkadotConfig}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // We can query a constant by providing a tuple of the pallet and constant name. The return type - // will be `Value` if we pass this query: - let constant_query = ("System", "BlockLength"); - let _value = api.constants().at(&constant_query)?; - - // Or we can use the library function to query a constant, which allows us to pass a generic type - // that Subxt will attempt to decode the constant into: - let constant_query = subxt::dynamic::constant::("System", "BlockLength"); - let value = api.constants().at(&constant_query)?; - - // Or we can obtain the bytes for the constant, using either form of query. - let bytes = api.constants().bytes_at(&constant_query)?; - - println!("Constant bytes: {:?}", bytes); - println!("Constant value: {}", value); - Ok(()) -} diff --git a/new/examples/constants_static.rs b/new/examples/constants_static.rs deleted file mode 100644 index 2bb1aecbf6..0000000000 --- a/new/examples/constants_static.rs +++ /dev/null @@ -1,24 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // A query to obtain some constant: - let constant_query = polkadot::constants().system().block_length(); - - // Obtain the value: - let value = api.constants().at(&constant_query)?; - - // Or obtain the bytes: - let bytes = api.constants().bytes_at(&constant_query)?; - - println!("Encoded block length: {bytes:?}"); - println!("Block length: {value:?}"); - Ok(()) -} diff --git a/new/examples/events.rs b/new/examples/events.rs deleted file mode 100644 index 9861c9238e..0000000000 --- a/new/examples/events.rs +++ /dev/null @@ -1,48 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Get events for the latest block: - let events = api.events().at_latest().await?; - - // We can dynamically decode events: - println!("Dynamic event details:"); - for event in events.iter() { - let event = event?; - - let pallet = event.pallet_name(); - let variant = event.variant_name(); - let field_values = event.decode_as_fields::()?; - - println!("{pallet}::{variant}: {field_values}"); - } - - // Or we can attempt to statically decode them into the root Event type: - println!("Static event details:"); - for event in events.iter() { - let event = event?; - - if let Ok(ev) = event.as_root_event::() { - println!("{ev:?}"); - } else { - println!(""); - } - } - - // Or we can look for specific events which match our statically defined ones: - let transfer_event = events.find_first::()?; - if let Some(ev) = transfer_event { - println!(" - Balance transfer success: value: {:?}", ev.amount); - } else { - println!(" - No balance transfer event found in this block"); - } - - Ok(()) -} diff --git a/new/examples/light_client_basic.rs b/new/examples/light_client_basic.rs deleted file mode 100644 index 397de2a255..0000000000 --- a/new/examples/light_client_basic.rs +++ /dev/null @@ -1,47 +0,0 @@ -#![allow(missing_docs)] -use futures::StreamExt; -use subxt::{PolkadotConfig, client::OnlineClient, lightclient::LightClient}; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -const POLKADOT_SPEC: &str = include_str!("../../artifacts/demo_chain_specs/polkadot.json"); -const ASSET_HUB_SPEC: &str = - include_str!("../../artifacts/demo_chain_specs/polkadot_asset_hub.json"); - -#[tokio::main] -async fn main() -> Result<(), Box> { - // The lightclient logs are informative: - tracing_subscriber::fmt::init(); - - // Instantiate a light client with the Polkadot relay chain, - // and connect it to Asset Hub, too. - let (lightclient, polkadot_rpc) = LightClient::relay_chain(POLKADOT_SPEC)?; - let asset_hub_rpc = lightclient.parachain(ASSET_HUB_SPEC)?; - - // Create Subxt clients from these Smoldot backed RPC clients. - let polkadot_api = OnlineClient::::from_rpc_client(polkadot_rpc).await?; - let asset_hub_api = OnlineClient::::from_rpc_client(asset_hub_rpc).await?; - - // Use them! - let polkadot_sub = polkadot_api - .blocks() - .subscribe_finalized() - .await? - .map(|block| ("Polkadot", block)); - let parachain_sub = asset_hub_api - .blocks() - .subscribe_finalized() - .await? - .map(|block| ("AssetHub", block)); - - let mut stream_combinator = futures::stream::select(polkadot_sub, parachain_sub); - - while let Some((chain, block)) = stream_combinator.next().await { - let block = block?; - println!(" Chain {:?} hash={:?}", chain, block.hash()); - } - - Ok(()) -} diff --git a/new/examples/light_client_local_node.rs b/new/examples/light_client_local_node.rs deleted file mode 100644 index 68012b8551..0000000000 --- a/new/examples/light_client_local_node.rs +++ /dev/null @@ -1,58 +0,0 @@ -#![allow(missing_docs)] -use subxt::utils::fetch_chainspec_from_rpc_node; -use subxt::{ - PolkadotConfig, - client::OnlineClient, - lightclient::{ChainConfig, LightClient}, -}; -use subxt_signer::sr25519::dev; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // The smoldot logs are informative: - tracing_subscriber::fmt::init(); - - // Use a utility function to obtain a chain spec from a locally running node: - let chain_spec = fetch_chainspec_from_rpc_node("ws://127.0.0.1:9944").await?; - - // Configure the bootnodes of this chain spec. In this case, because we start one - // single node, the bootnodes must be overwritten for the light client to connect - // to the local node. - // - // The `12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp` is the P2P address - // from a local polkadot node starting with - // `--node-key 0000000000000000000000000000000000000000000000000000000000000001` - let chain_config = ChainConfig::chain_spec(chain_spec.get()).set_bootnodes([ - "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp", - ])?; - - // Start the light client up, establishing a connection to the local node. - let (_light_client, chain_rpc) = LightClient::relay_chain(chain_config)?; - let api = OnlineClient::::from_rpc_client(chain_rpc).await?; - - // Build a balance transfer extrinsic. - let dest = dev::bob().public_key().into(); - let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); - - // Submit the balance transfer extrinsic from Alice, and wait for it to be successful - // and in a finalized block. We get back the extrinsic events if all is well. - let from = dev::alice(); - let events = api - .tx() - .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) - .await? - .wait_for_finalized_success() - .await?; - - // Find a Transfer event and print it. - let transfer_event = events.find_first::()?; - if let Some(event) = transfer_event { - println!("Balance transfer success: {event:?}"); - } - - Ok(()) -} diff --git a/new/examples/rpc_legacy.rs b/new/examples/rpc_legacy.rs deleted file mode 100644 index a21afd8f97..0000000000 --- a/new/examples/rpc_legacy.rs +++ /dev/null @@ -1,61 +0,0 @@ -#![allow(missing_docs)] -use subxt::backend::{legacy::LegacyRpcMethods, rpc::RpcClient}; -use subxt::config::DefaultExtrinsicParamsBuilder as Params; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // First, create a raw RPC client: - let rpc_client = RpcClient::from_url("ws://127.0.0.1:9944").await?; - - // Use this to construct our RPC methods: - let rpc = LegacyRpcMethods::::new(rpc_client.clone()); - - // We can use the same client to drive our full Subxt interface too: - let api = OnlineClient::::from_rpc_client(rpc_client.clone()).await?; - - // Now, we can make some RPC calls using some legacy RPC methods. - println!( - "📛 System Name: {:?}\n🩺 Health: {:?}\n🖫 Properties: {:?}\n🔗 Chain: {:?}\n", - rpc.system_name().await?, - rpc.system_health().await?, - rpc.system_properties().await?, - rpc.system_chain().await? - ); - - // We can also interleave RPC calls and using the full Subxt client, here to submit multiple - // transactions using the legacy `system_account_next_index` RPC call, which returns a nonce - // that is adjusted for any transactions already in the pool: - - let alice = dev::alice(); - let bob = dev::bob(); - - loop { - let current_nonce = rpc - .system_account_next_index(&alice.public_key().into()) - .await?; - - let ext_params = Params::new().mortal(8).nonce(current_nonce).build(); - - let balance_transfer = polkadot::tx() - .balances() - .transfer_allow_death(bob.public_key().into(), 1_000_000); - - let ext_hash = api - .tx() - .create_partial_offline(&balance_transfer, ext_params)? - .sign(&alice) - .submit() - .await?; - - println!("Submitted ext {ext_hash} with nonce {current_nonce}"); - - // Sleep less than block time, but long enough to ensure - // not all transactions end up in the same block. - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - } -} diff --git a/new/examples/runtime_apis_dynamic.rs b/new/examples/runtime_apis_dynamic.rs deleted file mode 100644 index ef9c4ac071..0000000000 --- a/new/examples/runtime_apis_dynamic.rs +++ /dev/null @@ -1,30 +0,0 @@ -#![allow(missing_docs)] -use subxt::utils::AccountId32; -use subxt::{OnlineClient, config::PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Create a "dynamic" runtime API payload that calls the - // `AccountNonceApi_account_nonce` function. We could use the - // `scale_value::Value` type as output, and a vec of those as inputs, - // but since we know the input + return types we can pass them directly. - // There is one input argument, so the inputs are a tuple of one element. - let account: AccountId32 = dev::alice().public_key().into(); - let runtime_api_call = - subxt::dynamic::runtime_api_call::<_, u64>("AccountNonceApi", "account_nonce", (account,)); - - // Submit the call to get back a result. - let nonce = api - .runtime_api() - .at_latest() - .await? - .call(runtime_api_call) - .await?; - - println!("Account nonce: {:#?}", nonce); - Ok(()) -} diff --git a/new/examples/runtime_apis_raw.rs b/new/examples/runtime_apis_raw.rs deleted file mode 100644 index 45b5eecc50..0000000000 --- a/new/examples/runtime_apis_raw.rs +++ /dev/null @@ -1,23 +0,0 @@ -#![allow(missing_docs)] -use subxt::ext::codec::{Compact, Decode}; -use subxt::ext::frame_metadata::RuntimeMetadataPrefixed; -use subxt::{OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Use runtime APIs at the latest block: - let runtime_apis = api.runtime_api().at_latest().await?; - - // Ask for metadata and decode it: - let result_bytes = runtime_apis.call_raw("Metadata_metadata", None).await?; - let (_, meta): (Compact, RuntimeMetadataPrefixed) = Decode::decode(&mut &*result_bytes)?; - - println!("{meta:?}"); - Ok(()) -} diff --git a/new/examples/runtime_apis_static.rs b/new/examples/runtime_apis_static.rs deleted file mode 100644 index 95228668e6..0000000000 --- a/new/examples/runtime_apis_static.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, config::PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Create a runtime API payload that calls into - // `AccountNonceApi_account_nonce` function. - let account = dev::alice().public_key().into(); - let runtime_api_call = polkadot::apis().account_nonce_api().account_nonce(account); - - // Submit the call and get back a result. - let nonce = api - .runtime_api() - .at_latest() - .await? - .call(runtime_api_call) - .await; - - println!("AccountNonceApi_account_nonce for Alice: {nonce:?}"); - Ok(()) -} diff --git a/new/examples/setup_client_custom_rpc.rs b/new/examples/setup_client_custom_rpc.rs deleted file mode 100644 index 47580ba32a..0000000000 --- a/new/examples/setup_client_custom_rpc.rs +++ /dev/null @@ -1,86 +0,0 @@ -#![allow(missing_docs)] -use std::{ - fmt::Write, - pin::Pin, - sync::{Arc, Mutex}, -}; -use subxt::{ - OnlineClient, PolkadotConfig, - backend::rpc::{RawRpcFuture, RawRpcSubscription, RawValue, RpcClient, RpcClientT}, -}; - -// A dummy RPC client that doesn't actually handle requests properly -// at all, but instead just logs what requests to it were made. -struct MyLoggingClient { - log: Arc>, -} - -// We have to implement this fairly low level trait to turn [`MyLoggingClient`] -// into an RPC client that we can make use of in Subxt. Here we just log the requests -// made but don't forward them to any real node, and instead just return nonsense. -impl RpcClientT for MyLoggingClient { - fn request_raw<'a>( - &'a self, - method: &'a str, - params: Option>, - ) -> RawRpcFuture<'a, Box> { - writeln!( - self.log.lock().unwrap(), - "{method}({})", - params.as_ref().map(|p| p.get()).unwrap_or("[]") - ) - .unwrap(); - - // We've logged the request; just return garbage. Because a boxed future is returned, - // you're able to run whatever async code you'd need to actually talk to a node. - let res = RawValue::from_string("[]".to_string()).unwrap(); - Box::pin(std::future::ready(Ok(res))) - } - - fn subscribe_raw<'a>( - &'a self, - sub: &'a str, - params: Option>, - unsub: &'a str, - ) -> RawRpcFuture<'a, RawRpcSubscription> { - writeln!( - self.log.lock().unwrap(), - "{sub}({}) (unsub: {unsub})", - params.as_ref().map(|p| p.get()).unwrap_or("[]") - ) - .unwrap(); - - // We've logged the request; just return garbage. Because a boxed future is returned, - // and that will return a boxed Stream impl, you have a bunch of flexibility to build - // and return whatever type of Stream you see fit. - let res = RawValue::from_string("[]".to_string()).unwrap(); - let stream = futures::stream::once(async move { Ok(res) }); - let stream: Pin + Send>> = Box::pin(stream); - // This subscription does not provide an ID. - Box::pin(std::future::ready(Ok(RawRpcSubscription { - stream, - id: None, - }))) - } -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Instantiate our replacement RPC client. - let log = Arc::default(); - let rpc_client = { - let inner = MyLoggingClient { - log: Arc::clone(&log), - }; - RpcClient::new(inner) - }; - - // Pass this into our OnlineClient to instantiate it. This will lead to some - // RPC calls being made to fetch chain details/metadata, which will immediately - // fail.. - let _ = OnlineClient::::from_rpc_client(rpc_client).await; - - // But, we can see that the calls were made via our custom RPC client: - println!("Log of calls made:\n\n{}", log.lock().unwrap().as_str()); - Ok(()) -} diff --git a/new/examples/setup_client_offline.rs b/new/examples/setup_client_offline.rs deleted file mode 100644 index ba483f7164..0000000000 --- a/new/examples/setup_client_offline.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![allow(missing_docs)] -use subxt::ext::codec::Decode; -use subxt::metadata::Metadata; -use subxt::utils::H256; -use subxt::{OfflineClient, config::PolkadotConfig}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // We need to obtain the following details for an OfflineClient to be instantiated: - - // 1. Genesis hash (RPC call: chain_getBlockHash(0)): - let genesis_hash = { - let h = "91b171bb158e2d3848fa23a9f1c25182fb8e20313b2c1eb49219da7a70ce90c3"; - let bytes = hex::decode(h).unwrap(); - H256::from_slice(&bytes) - }; - - // 2. A runtime version (system_version constant on a Substrate node has these): - let runtime_version = subxt::client::RuntimeVersion { - spec_version: 9370, - transaction_version: 20, - }; - - // 3. Metadata (I'll load it from the downloaded metadata, but you can use - // `subxt metadata > file.scale` to download it): - let metadata = { - let bytes = std::fs::read("./artifacts/polkadot_metadata_small.scale").unwrap(); - Metadata::decode(&mut &*bytes).unwrap() - }; - - // Create an offline client using the details obtained above: - let _api = OfflineClient::::new(genesis_hash, runtime_version, metadata); - - Ok(()) -} diff --git a/new/examples/setup_config_assethub.rs b/new/examples/setup_config_assethub.rs deleted file mode 100644 index b39f39a2dd..0000000000 --- a/new/examples/setup_config_assethub.rs +++ /dev/null @@ -1,54 +0,0 @@ -#![allow(missing_docs)] -use subxt::config::{ - Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, PolkadotConfig, SubstrateConfig, -}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt( - runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", - derive_for_type( - path = "staging_xcm::v3::multilocation::MultiLocation", - derive = "Clone, codec::Encode", - recursive - ) -)] -pub mod runtime {} -use runtime::runtime_types::staging_xcm::v3::multilocation::MultiLocation; -use runtime::runtime_types::xcm::v3::junctions::Junctions; - -// We don't need to construct this at runtime, so an empty enum is appropriate. -pub enum AssetHubConfig {} - -impl Config for AssetHubConfig { - type AccountId = ::AccountId; - type Address = ::Address; - type Signature = ::Signature; - type Hasher = ::Hasher; - type Header = ::Header; - type ExtrinsicParams = DefaultExtrinsicParams; - // Here we use the MultiLocation from the metadata as a part of the config: - // The `ChargeAssetTxPayment` signed extension that is part of the ExtrinsicParams above, now uses the type: - type AssetId = MultiLocation; -} - -#[tokio::main] -async fn main() { - // With the config defined, we can create an extrinsic with subxt: - let client = subxt::OnlineClient::::new().await.unwrap(); - let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); - - // Build extrinsic params using an asset at this location as a tip: - let location: MultiLocation = MultiLocation { - parents: 3, - interior: Junctions::Here, - }; - let tx_config = DefaultExtrinsicParamsBuilder::::new() - .tip_of(1234, location) - .build(); - - // And provide the extrinsic params including the tip when submitting a transaction: - let _ = client - .tx() - .sign_and_submit_then_watch(&tx_payload, &dev::alice(), tx_config) - .await; -} diff --git a/new/examples/setup_config_custom.rs b/new/examples/setup_config_custom.rs deleted file mode 100644 index a4732f3f89..0000000000 --- a/new/examples/setup_config_custom.rs +++ /dev/null @@ -1,97 +0,0 @@ -#![allow(missing_docs)] -use codec::Encode; -use subxt::client::ClientState; -use subxt::config::{ - Config, ExtrinsicParams, ExtrinsicParamsEncoder, ExtrinsicParamsError, HashFor, - transaction_extensions::Params, -}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] -pub mod runtime {} - -// We don't need to construct this at runtime, -// so an empty enum is appropriate: -pub enum CustomConfig {} - -impl Config for CustomConfig { - type AccountId = subxt::utils::AccountId32; - type Address = subxt::utils::MultiAddress; - type Signature = subxt::utils::MultiSignature; - type Hasher = subxt::config::substrate::BlakeTwo256; - type Header = subxt::config::substrate::SubstrateHeader; - type ExtrinsicParams = CustomExtrinsicParams; - type AssetId = u32; -} - -// This represents some arbitrary (and nonsensical) custom parameters that -// will be attached to transaction extra and additional payloads: -pub struct CustomExtrinsicParams { - genesis_hash: HashFor, - tip: u128, - foo: bool, -} - -// We can provide a "pretty" interface to allow users to provide these: -#[derive(Default)] -pub struct CustomExtrinsicParamsBuilder { - tip: u128, - foo: bool, -} - -impl CustomExtrinsicParamsBuilder { - pub fn new() -> Self { - Default::default() - } - pub fn tip(mut self, value: u128) -> Self { - self.tip = value; - self - } - pub fn enable_foo(mut self) -> Self { - self.foo = true; - self - } -} - -impl Params for CustomExtrinsicParamsBuilder {} - -// Describe how to fetch and then encode the params: -impl ExtrinsicParams for CustomExtrinsicParams { - type Params = CustomExtrinsicParamsBuilder; - - // Gather together all of the params we will need to encode: - fn new(client: &ClientState, params: Self::Params) -> Result { - Ok(Self { - genesis_hash: client.genesis_hash, - tip: params.tip, - foo: params.foo, - }) - } -} - -// Encode the relevant params when asked: -impl ExtrinsicParamsEncoder for CustomExtrinsicParams { - fn encode_value_to(&self, v: &mut Vec) { - (self.tip, self.foo).encode_to(v); - } - fn encode_implicit_to(&self, v: &mut Vec) { - self.genesis_hash.encode_to(v) - } -} - -#[tokio::main] -async fn main() { - // With the config defined, it can be handed to Subxt as follows: - let client = subxt::OnlineClient::::new().await.unwrap(); - - let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); - - // Build your custom "Params": - let tx_config = CustomExtrinsicParamsBuilder::new().tip(1234).enable_foo(); - - // And provide them when submitting a transaction: - let _ = client - .tx() - .sign_and_submit_then_watch(&tx_payload, &dev::alice(), tx_config) - .await; -} diff --git a/new/examples/setup_config_transaction_extension.rs b/new/examples/setup_config_transaction_extension.rs deleted file mode 100644 index f0fcc58894..0000000000 --- a/new/examples/setup_config_transaction_extension.rs +++ /dev/null @@ -1,106 +0,0 @@ -#![allow(missing_docs)] -use codec::Encode; -use scale_encode::EncodeAsType; -use scale_info::PortableRegistry; -use subxt::client::ClientState; -use subxt::config::transaction_extensions; -use subxt::config::{ - Config, DefaultExtrinsicParamsBuilder, ExtrinsicParams, ExtrinsicParamsEncoder, - ExtrinsicParamsError, -}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod runtime {} - -// We don't need to construct this at runtime, -// so an empty enum is appropriate: -#[derive(EncodeAsType)] -pub enum CustomConfig {} - -impl Config for CustomConfig { - type AccountId = subxt::utils::AccountId32; - type Address = subxt::utils::MultiAddress; - type Signature = subxt::utils::MultiSignature; - type Hasher = subxt::config::substrate::BlakeTwo256; - type Header = subxt::config::substrate::SubstrateHeader; - type ExtrinsicParams = transaction_extensions::AnyOf< - Self, - ( - // Load in the existing signed extensions we're interested in - // (if the extension isn't actually needed it'll just be ignored): - transaction_extensions::VerifySignature, - transaction_extensions::CheckSpecVersion, - transaction_extensions::CheckTxVersion, - transaction_extensions::CheckNonce, - transaction_extensions::CheckGenesis, - transaction_extensions::CheckMortality, - transaction_extensions::ChargeAssetTxPayment, - transaction_extensions::ChargeTransactionPayment, - transaction_extensions::CheckMetadataHash, - // And add a new one of our own: - CustomTransactionExtension, - ), - >; - type AssetId = u32; -} - -// Our custom signed extension doesn't do much: -pub struct CustomTransactionExtension; - -// Give the extension a name; this allows `AnyOf` to look it -// up in the chain metadata in order to know when and if to use it. -impl transaction_extensions::TransactionExtension for CustomTransactionExtension { - type Decoded = (); - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "CustomTransactionExtension" - } -} - -// Gather together any params we need for our signed extension, here none. -impl ExtrinsicParams for CustomTransactionExtension { - type Params = (); - - fn new(_client: &ClientState, _params: Self::Params) -> Result { - Ok(CustomTransactionExtension) - } -} - -// Encode whatever the extension needs to provide when asked: -impl ExtrinsicParamsEncoder for CustomTransactionExtension { - fn encode_value_to(&self, v: &mut Vec) { - "Hello".encode_to(v); - } - fn encode_implicit_to(&self, v: &mut Vec) { - true.encode_to(v) - } -} - -// When composing a tuple of signed extensions, the user parameters we need must -// be able to convert `Into` a tuple of corresponding `Params`. Here, we just -// "hijack" the default param builder, but add the `Params` (`()`) for our -// new signed extension at the end, to make the types line up. IN reality you may wish -// to construct an entirely new interface to provide the relevant `Params`. -pub fn custom( - params: DefaultExtrinsicParamsBuilder, -) -> <::ExtrinsicParams as ExtrinsicParams>::Params { - let (a, b, c, d, e, f, g, h, i) = params.build(); - (a, b, c, d, e, f, g, h, i, ()) -} - -#[tokio::main] -async fn main() { - // With the config defined, it can be handed to Subxt as follows: - let client = subxt::OnlineClient::::new().await.unwrap(); - - let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); - - // Configure the tx params: - let tx_config = DefaultExtrinsicParamsBuilder::new().tip(1234); - - // And provide them when submitting a transaction: - let _ = client - .tx() - .sign_and_submit_then_watch(&tx_payload, &dev::alice(), custom(tx_config)) - .await; -} diff --git a/new/examples/setup_reconnecting_rpc_client.rs b/new/examples/setup_reconnecting_rpc_client.rs deleted file mode 100644 index a3763947c7..0000000000 --- a/new/examples/setup_reconnecting_rpc_client.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Example to utilize the `reconnecting rpc client` in subxt -//! which hidden behind behind `--feature reconnecting-rpc-client` -//! -//! To utilize full logs from the RPC client use: -//! `RUST_LOG="jsonrpsee=trace,subxt-reconnecting-rpc-client=trace"` - -#![allow(missing_docs)] - -use std::time::Duration; - -use futures::StreamExt; -use subxt::backend::rpc::reconnecting_rpc_client::{ExponentialBackoff, RpcClient}; -use subxt::{OnlineClient, PolkadotConfig}; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt::init(); - - // Create a new client with a reconnecting RPC client. - let rpc = RpcClient::builder() - // Reconnect with exponential backoff - // - // This API is "iterator-like" and we use `take` to limit the number of retries. - .retry_policy( - ExponentialBackoff::from_millis(100) - .max_delay(Duration::from_secs(10)) - .take(3), - ) - // There are other configurations as well that can be found at [`reconnecting_rpc_client::ClientBuilder`]. - .build("ws://localhost:9944".to_string()) - .await?; - - // If you want to use the chainhead backend with the reconnecting RPC client, you can do so like this: - // - // ``` - // use subxt::backend::chain_head:ChainHeadBackend; - // use subxt::OnlineClient; - // - // let backend = ChainHeadBackend::builder().build_with_background_task(RpcClient::new(rpc.clone())); - // let api: OnlineClient = OnlineClient::from_backend(Arc::new(backend)).await?; - // ``` - - let api: OnlineClient = OnlineClient::from_rpc_client(rpc.clone()).await?; - - // Run for at most 100 blocks and print a bunch of information about it. - // - // The subscription is automatically re-started when the RPC client has reconnected. - // You can test that by stopping the polkadot node and restarting it. - let mut blocks_sub = api.blocks().subscribe_finalized().await?.take(100); - - while let Some(block) = blocks_sub.next().await { - let block = match block { - Ok(b) => b, - Err(e) => { - // This can only happen on the legacy backend and the unstable backend - // will handle this internally. - if e.is_disconnected_will_reconnect() { - println!("The RPC connection was lost and we may have missed a few blocks"); - continue; - } - - return Err(e.into()); - } - }; - - let block_number = block.number(); - let block_hash = block.hash(); - - println!("Block #{block_number} ({block_hash})"); - } - - Ok(()) -} diff --git a/new/examples/setup_rpc_chainhead_backend.rs b/new/examples/setup_rpc_chainhead_backend.rs deleted file mode 100644 index 37da5fce19..0000000000 --- a/new/examples/setup_rpc_chainhead_backend.rs +++ /dev/null @@ -1,35 +0,0 @@ -//! Example to utilize the ChainHeadBackend rpc backend to subscribe to finalized blocks. - -#![allow(missing_docs)] - -use futures::StreamExt; -use subxt::backend::chain_head::{ChainHeadBackend, ChainHeadBackendBuilder}; -use subxt::backend::rpc::RpcClient; -use subxt::{OnlineClient, PolkadotConfig}; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt::init(); - - let rpc = RpcClient::from_url("ws://localhost:9944".to_string()).await?; - let backend: ChainHeadBackend = - ChainHeadBackendBuilder::default().build_with_background_driver(rpc.clone()); - let api = OnlineClient::from_backend(std::sync::Arc::new(backend)).await?; - - let mut blocks_sub = api.blocks().subscribe_finalized().await?.take(100); - - while let Some(block) = blocks_sub.next().await { - let block = block?; - - let block_number = block.number(); - let block_hash = block.hash(); - - println!("Block #{block_number} ({block_hash})"); - } - - Ok(()) -} diff --git a/new/examples/storage_fetch.rs b/new/examples/storage_fetch.rs deleted file mode 100644 index 1fe491898c..0000000000 --- a/new/examples/storage_fetch.rs +++ /dev/null @@ -1,32 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - let account = dev::alice().public_key().into(); - - // Build a storage query to access account information. - let storage_query = polkadot::storage().system().account(); - - // Use that query to access a storage entry, fetch a result and decode the value. - // The static address knows that fetching requires a tuple of one value, an - // AccountId32. - let client_at = api.storage().at_latest().await?; - let account_info = client_at - .entry(storage_query)? - .fetch((account,)) - .await? - .decode()?; - - // The static address that we got from the subxt macro knows the expected input - // and return types, so it is decoded into a static type for us. - println!("Alice: {account_info:?}"); - Ok(()) -} diff --git a/new/examples/storage_fetch_dynamic.rs b/new/examples/storage_fetch_dynamic.rs deleted file mode 100644 index 61a81fef98..0000000000 --- a/new/examples/storage_fetch_dynamic.rs +++ /dev/null @@ -1,34 +0,0 @@ -#![allow(missing_docs)] -use subxt::dynamic::{At, Value}; -use subxt::utils::AccountId32; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a dynamic storage query to access account information. - // here, we assume that there is one value to provide at this entry - // to access a value; an AccountId32. In this example we don't know the - // return type and so we set it to `Value`, which anything can decode into. - let account: AccountId32 = dev::alice().public_key().into(); - let storage_query = subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); - - // Use that query to access a storage entry, fetch a result and decode the value. - let client_at = api.storage().at_latest().await?; - let account_info = client_at - .entry(storage_query)? - .fetch((account,)) - .await? - .decode()?; - - // With out `Value` type we can dig in to find what we want using the `At` - // trait and `.at()` method that this provides on the Value. - println!( - "Alice has free balance: {}", - account_info.at("data").at("free").unwrap() - ); - Ok(()) -} diff --git a/new/examples/storage_iterating.rs b/new/examples/storage_iterating.rs deleted file mode 100644 index 3ff74029bd..0000000000 --- a/new/examples/storage_iterating.rs +++ /dev/null @@ -1,42 +0,0 @@ -#![allow(missing_docs)] -use subxt::ext::futures::StreamExt; -use subxt::{OnlineClient, PolkadotConfig}; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a storage query to access account information. Same as if we were - // fetching a single value from this entry. - let storage_query = polkadot::storage().system().account(); - - // Use that query to access a storage entry, iterate over it and decode values. - let client_at = api.storage().at_latest().await?; - - // We provide an empty tuple when iterating. If the storage entry had been an N map with - // multiple keys, then we could provide any prefix of those keys to iterate over. This is - // statically type checked, so only a valid number/type of keys in the tuple is accepted. - let mut values = client_at.entry(storage_query)?.iter(()).await?; - - while let Some(kv) = values.next().await { - let kv = kv?; - - // The key decodes into the type that the static address knows about, in this case a - // tuple of one entry, because the only part of the key that we can decode is the - // AccountId32 for each user. - let (account_id32,) = kv.key()?.decode()?; - - // The value decodes into a statically generated type which holds account information. - let value = kv.value().decode()?; - - let value_data = value.data; - println!("{account_id32}:\n {value_data:?}"); - } - - Ok(()) -} diff --git a/new/examples/storage_iterating_dynamic.rs b/new/examples/storage_iterating_dynamic.rs deleted file mode 100644 index 443c977eef..0000000000 --- a/new/examples/storage_iterating_dynamic.rs +++ /dev/null @@ -1,42 +0,0 @@ -#![allow(missing_docs)] -use subxt::ext::futures::StreamExt; -use subxt::utils::AccountId32; -use subxt::{ - OnlineClient, PolkadotConfig, - dynamic::{At, Value}, -}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a dynamic storage query to access account information. - // here, we assume that there is one value to provide at this entry - // to access a value; an AccountId32. In this example we don't know the - // return type and so we set it to `Value`, which anything can decode into. - let storage_query = subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); - - // Use that query to access a storage entry, iterate over it and decode values. - let client_at = api.storage().at_latest().await?; - let mut values = client_at.entry(storage_query)?.iter(()).await?; - - while let Some(kv) = values.next().await { - let kv = kv?; - - // The key decodes into the first type we provided in the address. Since there's just - // one key, it is a tuple of one entry, an AccountId32. If we didn't know how many - // keys or their type, we could set the key to `Vec` instead. - let (account_id32,) = kv.key()?.decode()?; - - // The value decodes into the second type we provided in the address. In this example, - // we just decode it into our `Value` type and then look at the "data" field in this - // (which implicitly assumes we get a struct shaped thing back with such a field). - let value = kv.value().decode()?; - - let value_data = value.at("data").unwrap(); - println!("{account_id32}:\n {value_data}"); - } - - Ok(()) -} diff --git a/new/examples/substrate_compat_signer.rs b/new/examples/substrate_compat_signer.rs deleted file mode 100644 index 968adffe76..0000000000 --- a/new/examples/substrate_compat_signer.rs +++ /dev/null @@ -1,117 +0,0 @@ -//! This example demonstrates how to use to add a custom signer implementation to `subxt` -//! by using the signer implementation from polkadot-sdk. -//! -//! Similar functionality was provided by the `substrate-compat` feature in the original `subxt` crate. -//! which is now removed. - -#![allow(missing_docs, unused)] - -use sp_core::{Pair as _, sr25519}; -use subxt::config::substrate::MultiAddress; -use subxt::{Config, OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -/// A concrete PairSigner implementation which relies on `sr25519::Pair` for signing -/// and that PolkadotConfig is the runtime configuration. -mod pair_signer { - use super::*; - use sp_runtime::{ - MultiSignature as SpMultiSignature, - traits::{IdentifyAccount, Verify}, - }; - use subxt::{ - config::substrate::{AccountId32, MultiSignature}, - tx::Signer, - }; - - /// A [`Signer`] implementation for [`sp_core::sr25519::Pair`]. - #[derive(Clone)] - pub struct PairSigner { - account_id: ::AccountId, - signer: sr25519::Pair, - } - - impl PairSigner { - /// Creates a new [`Signer`] from an [`sp_core::sr25519::Pair`]. - pub fn new(signer: sr25519::Pair) -> Self { - let account_id = - ::Signer::from(signer.public()).into_account(); - Self { - // Convert `sp_core::AccountId32` to `subxt::config::substrate::AccountId32`. - // - // This is necessary because we use `subxt::config::substrate::AccountId32` and no - // From/Into impls are provided between `sp_core::AccountId32` because `polkadot-sdk` isn't a direct - // dependency in subxt. - // - // This can also be done by provided a wrapper type around `subxt::config::substrate::AccountId32` to implement - // such conversions but that also most likely requires a custom `Config` with a separate `AccountId` type to work - // properly without additional hacks. - account_id: AccountId32(account_id.into()), - signer, - } - } - - /// Returns the [`sp_core::sr25519::Pair`] implementation used to construct this. - pub fn signer(&self) -> &sr25519::Pair { - &self.signer - } - - /// Return the account ID. - pub fn account_id(&self) -> &AccountId32 { - &self.account_id - } - } - - impl Signer for PairSigner { - fn account_id(&self) -> ::AccountId { - self.account_id.clone() - } - - fn sign(&self, signer_payload: &[u8]) -> ::Signature { - let signature = self.signer.sign(signer_payload); - MultiSignature::Sr25519(signature.0) - } - } -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt::init(); - - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - let signer = { - let acc = sr25519::Pair::from_string("//Alice", None)?; - pair_signer::PairSigner::new(acc) - }; - - let dest = { - let acc = sr25519::Pair::from_string("//Bob", None)?; - MultiAddress::Address32(acc.public().0) - }; - - // Build a balance transfer extrinsic. - let balance_transfer_tx = polkadot::tx() - .balances() - .transfer_allow_death(dest, 100_000); - - // Submit the balance transfer extrinsic from Alice, and wait for it to be successful - // and in a finalized block. We get back the extrinsic events if all is well. - let events = api - .tx() - .sign_and_submit_then_watch_default(&balance_transfer_tx, &signer) - .await? - .wait_for_finalized_success() - .await?; - - // Find a Transfer event and print it. - let transfer_event = events.find_first::()?; - if let Some(event) = transfer_event { - println!("Balance transfer success: {event:?}"); - } - - Ok(()) -} diff --git a/new/examples/tx_basic_frontier.rs b/new/examples/tx_basic_frontier.rs deleted file mode 100644 index 23b577a055..0000000000 --- a/new/examples/tx_basic_frontier.rs +++ /dev/null @@ -1,56 +0,0 @@ -//! Example to use subxt to talk to substrate-based nodes with ethereum accounts -//! which is not the default for subxt which is why we need to provide a custom config. -//! -//! This example requires to run a local frontier/moonbeam node to work. - -#![allow(missing_docs)] - -use subxt::OnlineClient; -use subxt_core::utils::AccountId20; -use subxt_signer::eth::{Signature, dev}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/frontier_metadata_small.scale")] -mod eth_runtime {} - -enum EthRuntimeConfig {} - -impl subxt::Config for EthRuntimeConfig { - type AccountId = AccountId20; - type Address = AccountId20; - type Signature = Signature; - type Hasher = subxt::config::substrate::BlakeTwo256; - type Header = - subxt::config::substrate::SubstrateHeader; - type ExtrinsicParams = subxt::config::SubstrateExtrinsicParams; - type AssetId = u32; -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - let api = OnlineClient::::from_insecure_url("ws://127.0.0.1:9944").await?; - - let alith = dev::alith(); - let baltathar = dev::baltathar(); - let dest = baltathar.public_key().to_account_id(); - - println!("baltathar pub: {}", hex::encode(baltathar.public_key().0)); - println!("baltathar addr: {}", hex::encode(dest)); - - let balance_transfer_tx = eth_runtime::tx() - .balances() - .transfer_allow_death(dest, 10_001); - - let events = api - .tx() - .sign_and_submit_then_watch_default(&balance_transfer_tx, &alith) - .await? - .wait_for_finalized_success() - .await?; - - let transfer_event = events.find_first::()?; - if let Some(event) = transfer_event { - println!("Balance transfer success: {event:?}"); - } - - Ok(()) -} diff --git a/new/examples/tx_boxed.rs b/new/examples/tx_boxed.rs deleted file mode 100644 index 0dd4c4d2e0..0000000000 --- a/new/examples/tx_boxed.rs +++ /dev/null @@ -1,43 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - let api = OnlineClient::::new().await?; - - // Prepare some extrinsics. These are boxed so that they can live alongside each other. - let txs = [dynamic_remark(), balance_transfer(), remark()]; - - for tx in txs { - let from = dev::alice(); - api.tx() - .sign_and_submit_then_watch_default(&tx, &from) - .await? - .wait_for_finalized_success() - .await?; - - println!("Submitted tx"); - } - - Ok(()) -} - -fn balance_transfer() -> Box { - let dest = dev::bob().public_key().into(); - Box::new(polkadot::tx().balances().transfer_allow_death(dest, 10_000)) -} - -fn remark() -> Box { - Box::new(polkadot::tx().system().remark(vec![1, 2, 3, 4, 5])) -} - -fn dynamic_remark() -> Box { - use subxt::dynamic::{Value, tx}; - let tx_payload = tx("System", "remark", vec![Value::from_bytes("Hello")]); - - Box::new(tx_payload) -} diff --git a/new/examples/tx_partial.rs b/new/examples/tx_partial.rs deleted file mode 100644 index 0684091de6..0000000000 --- a/new/examples/tx_partial.rs +++ /dev/null @@ -1,53 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -type BoxedError = Box; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), BoxedError> { - // Spawned tasks require things held across await points to impl Send, - // so we use one to demonstrate that this is possible with `PartialTransaction` - tokio::spawn(signing_example()).await??; - Ok(()) -} - -async fn signing_example() -> Result<(), BoxedError> { - let api = OnlineClient::::new().await?; - - // Build a balance transfer extrinsic. - let dest = dev::bob().public_key().into(); - let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); - - let alice = dev::alice(); - - // Create partial tx, ready to be signed. - let mut partial_tx = api - .tx() - .create_partial( - &balance_transfer_tx, - &alice.public_key().to_account_id(), - Default::default(), - ) - .await?; - - // Simulate taking some time to get a signature back, in part to - // show that the `PartialTransaction` can be held across await points. - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let signature = alice.sign(&partial_tx.signer_payload()); - - // Sign the transaction. - let tx = partial_tx - .sign_with_account_and_signature(&alice.public_key().to_account_id(), &signature.into()); - - // Submit it. - tx.submit_and_watch() - .await? - .wait_for_finalized_success() - .await?; - - Ok(()) -} diff --git a/new/examples/tx_status_stream.rs b/new/examples/tx_status_stream.rs deleted file mode 100644 index cdd55c4e82..0000000000 --- a/new/examples/tx_status_stream.rs +++ /dev/null @@ -1,55 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig, tx::TxStatus}; -use subxt_signer::sr25519::dev; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a balance transfer extrinsic. - let dest = dev::bob().public_key().into(); - let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); - - // Submit the balance transfer extrinsic from Alice, and then monitor the - // progress of it. - let from = dev::alice(); - let mut balance_transfer_progress = api - .tx() - .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) - .await?; - - while let Some(status) = balance_transfer_progress.next().await { - match status? { - // It's finalized in a block! - TxStatus::InFinalizedBlock(in_block) => { - println!( - "Transaction {:?} is finalized in block {:?}", - in_block.extrinsic_hash(), - in_block.block_hash() - ); - - // grab the events and fail if no ExtrinsicSuccess event seen: - let events = in_block.wait_for_success().await?; - // We can look for events (this uses the static interface; we can also iterate - // over them and dynamically decode them): - let transfer_event = events.find_first::()?; - - if let Some(event) = transfer_event { - println!("Balance transfer success: {event:?}"); - } else { - println!("Failed to find Balances::Transfer Event"); - } - } - // Just log any other status we encounter: - other => { - println!("Status: {other:?}"); - } - } - } - Ok(()) -} diff --git a/new/examples/tx_with_params.rs b/new/examples/tx_with_params.rs deleted file mode 100644 index 00126a7f9f..0000000000 --- a/new/examples/tx_with_params.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![allow(missing_docs)] -use subxt::config::polkadot::PolkadotExtrinsicParamsBuilder as Params; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a balance transfer extrinsic. - let dest = dev::bob().public_key().into(); - let tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); - - // Configure the transaction parameters; we give a small tip and set the - // transaction to live for 32 blocks from the `latest_block` above. - let tx_params = Params::new().tip(1_000).mortal(32).build(); - - // submit the transaction: - let from = dev::alice(); - let hash = api.tx().sign_and_submit(&tx, &from, tx_params).await?; - println!("Balance transfer extrinsic submitted with hash : {hash}"); - - Ok(()) -} diff --git a/new/src/backend/chain_head/follow_stream.rs b/new/src/backend/chain_head/follow_stream.rs deleted file mode 100644 index b763a6270b..0000000000 --- a/new/src/backend/chain_head/follow_stream.rs +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::config::{Config, HashFor, RpcConfigFor}; -use crate::error::BackendError; -use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; -use subxt_rpcs::methods::chain_head::{ChainHeadRpcMethods, FollowEvent}; - -/// A `Stream` whose goal is to remain subscribed to `chainHead_follow`. It will re-subscribe if the subscription -/// is ended for any reason, and it will return the current `subscription_id` as an event, along with the other -/// follow events. -pub struct FollowStream { - // Using this and not just keeping a copy of the RPC methods - // around means that we can test this in isolation with dummy streams. - stream_getter: FollowEventStreamGetter, - stream: InnerStreamState, -} - -impl std::fmt::Debug for FollowStream { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("FollowStream") - .field("stream_getter", &"..") - .field("stream", &self.stream) - .finish() - } -} - -/// A getter function that returns an [`FollowEventStreamFut`]. -pub type FollowEventStreamGetter = Box FollowEventStreamFut + Send>; - -/// The future which will return a stream of follow events and the subscription ID for it. -pub type FollowEventStreamFut = Pin< - Box< - dyn Future, String), BackendError>> - + Send - + 'static, - >, ->; - -/// The stream of follow events. -pub type FollowEventStream = - Pin, BackendError>> + Send + 'static>>; - -/// Either a ready message with the current subscription ID, or -/// an event from the stream itself. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum FollowStreamMsg { - /// The stream is ready (and has a subscription ID) - Ready(String), - /// An event from the stream. - Event(FollowEvent), -} - -impl FollowStreamMsg { - /// Return an event, or none if the message is a "ready" one. - pub fn into_event(self) -> Option> { - match self { - FollowStreamMsg::Ready(_) => None, - FollowStreamMsg::Event(e) => Some(e), - } - } -} - -enum InnerStreamState { - /// We've just created the stream; we'll start Initializing it - New, - /// We're fetching the inner subscription. Move to Ready when we have one. - Initializing(FollowEventStreamFut), - /// Report back the subscription ID here, and then start ReceivingEvents. - Ready(Option<(FollowEventStream, String)>), - /// We are polling for, and receiving events from the stream. - ReceivingEvents(FollowEventStream), - /// We received a stop event. We'll send one on and restart the stream. - Stopped, - /// The stream is finished and will not restart (likely due to an error). - Finished, -} - -impl std::fmt::Debug for InnerStreamState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::New => write!(f, "New"), - Self::Initializing(_) => write!(f, "Initializing(..)"), - Self::Ready(_) => write!(f, "Ready(..)"), - Self::ReceivingEvents(_) => write!(f, "ReceivingEvents(..)"), - Self::Stopped => write!(f, "Stopped"), - Self::Finished => write!(f, "Finished"), - } - } -} - -impl FollowStream { - /// Create a new [`FollowStream`] given a function which returns the stream. - pub fn new(stream_getter: FollowEventStreamGetter) -> Self { - Self { - stream_getter, - stream: InnerStreamState::New, - } - } - - /// Create a new [`FollowStream`] given the RPC methods. - pub fn from_methods( - methods: ChainHeadRpcMethods>, - ) -> FollowStream> { - FollowStream { - stream_getter: Box::new(move || { - let methods = methods.clone(); - Box::pin(async move { - // Make the RPC call: - let stream = methods.chainhead_v1_follow(true).await?; - // Extract the subscription ID: - let Some(sub_id) = stream.subscription_id().map(ToOwned::to_owned) else { - return Err(BackendError::other( - "Subscription ID expected for chainHead_follow response, but not given", - )); - }; - // Map stream errors into the higher level subxt one: - let stream = stream.map_err(|e| e.into()); - let stream: FollowEventStream> = Box::pin(stream); - // Return both: - Ok((stream, sub_id)) - }) - }), - stream: InnerStreamState::New, - } - } -} - -impl std::marker::Unpin for FollowStream {} - -impl Stream for FollowStream { - type Item = Result, BackendError>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - loop { - match &mut this.stream { - InnerStreamState::New => { - let fut = (this.stream_getter)(); - this.stream = InnerStreamState::Initializing(fut); - continue; - } - InnerStreamState::Initializing(fut) => { - match fut.poll_unpin(cx) { - Poll::Pending => { - return Poll::Pending; - } - Poll::Ready(Ok(sub_with_id)) => { - this.stream = InnerStreamState::Ready(Some(sub_with_id)); - continue; - } - Poll::Ready(Err(e)) => { - // Re-start if a reconnecting backend was enabled. - if e.is_disconnected_will_reconnect() { - this.stream = InnerStreamState::Stopped; - continue; - } - - // Finish forever if there's an error, passing it on. - this.stream = InnerStreamState::Finished; - return Poll::Ready(Some(Err(e))); - } - } - } - InnerStreamState::Ready(stream) => { - // We never set the Option to `None`; we just have an Option so - // that we can take ownership of the contents easily here. - let (sub, sub_id) = stream.take().expect("should always be Some"); - this.stream = InnerStreamState::ReceivingEvents(sub); - return Poll::Ready(Some(Ok(FollowStreamMsg::Ready(sub_id)))); - } - InnerStreamState::ReceivingEvents(stream) => { - match stream.poll_next_unpin(cx) { - Poll::Pending => { - return Poll::Pending; - } - Poll::Ready(None) => { - // No error happened but the stream ended; restart and - // pass on a Stop message anyway. - this.stream = InnerStreamState::Stopped; - continue; - } - Poll::Ready(Some(Ok(ev))) => { - if let FollowEvent::Stop = ev { - // A stop event means the stream has ended, so start - // over after passing on the stop message. - this.stream = InnerStreamState::Stopped; - continue; - } - return Poll::Ready(Some(Ok(FollowStreamMsg::Event(ev)))); - } - Poll::Ready(Some(Err(e))) => { - // Re-start if a reconnecting backend was enabled. - if e.is_disconnected_will_reconnect() { - this.stream = InnerStreamState::Stopped; - continue; - } - - // Finish forever if there's an error, passing it on. - this.stream = InnerStreamState::Finished; - return Poll::Ready(Some(Err(e))); - } - } - } - InnerStreamState::Stopped => { - this.stream = InnerStreamState::New; - return Poll::Ready(Some(Ok(FollowStreamMsg::Event(FollowEvent::Stop)))); - } - InnerStreamState::Finished => { - return Poll::Ready(None); - } - } - } - } -} - -#[cfg(test)] -pub(super) mod test_utils { - use super::*; - use crate::config::substrate::H256; - use std::sync::Arc; - use std::sync::atomic::{AtomicUsize, Ordering}; - use subxt_rpcs::methods::chain_head::{BestBlockChanged, Finalized, Initialized, NewBlock}; - - /// Given some events, returns a follow stream getter that we can use in - /// place of the usual RPC method. - pub fn test_stream_getter(events: F) -> FollowEventStreamGetter - where - Hash: Send + 'static, - F: Fn() -> I + Send + 'static, - I: IntoIterator, BackendError>>, - { - let start_idx = Arc::new(AtomicUsize::new(0)); - - Box::new(move || { - // Start the events from where we left off last time. - let start_idx = start_idx.clone(); - let this_idx = start_idx.load(Ordering::Relaxed); - let events: Vec<_> = events().into_iter().skip(this_idx).collect(); - - Box::pin(async move { - // Increment start_idx for each event we see, so that if we get - // the stream again, we get only the remaining events for it. - let stream = futures::stream::iter(events).map(move |ev| { - start_idx.fetch_add(1, Ordering::Relaxed); - ev - }); - - let stream: FollowEventStream = Box::pin(stream); - Ok((stream, format!("sub_id_{this_idx}"))) - }) - }) - } - - /// An initialized event - pub fn ev_initialized(n: u64) -> FollowEvent { - FollowEvent::Initialized(Initialized { - finalized_block_hashes: vec![H256::from_low_u64_le(n)], - finalized_block_runtime: None, - }) - } - - /// A new block event - pub fn ev_new_block(parent_n: u64, n: u64) -> FollowEvent { - FollowEvent::NewBlock(NewBlock { - parent_block_hash: H256::from_low_u64_le(parent_n), - block_hash: H256::from_low_u64_le(n), - new_runtime: None, - }) - } - - /// A best block event - pub fn ev_best_block(n: u64) -> FollowEvent { - FollowEvent::BestBlockChanged(BestBlockChanged { - best_block_hash: H256::from_low_u64_le(n), - }) - } - - /// A finalized event - pub fn ev_finalized( - finalized_ns: impl IntoIterator, - pruned_ns: impl IntoIterator, - ) -> FollowEvent { - FollowEvent::Finalized(Finalized { - finalized_block_hashes: finalized_ns - .into_iter() - .map(H256::from_low_u64_le) - .collect(), - pruned_block_hashes: pruned_ns.into_iter().map(H256::from_low_u64_le).collect(), - }) - } -} - -#[cfg(test)] -pub mod test { - use super::*; - use test_utils::{ev_initialized, ev_new_block, test_stream_getter}; - - #[tokio::test] - async fn follow_stream_provides_messages_until_error() { - // The events we'll get back on the stream. - let stream_getter = test_stream_getter(|| { - [ - Ok(ev_initialized(1)), - // Stop should lead to a drop and resubscribe: - Ok(FollowEvent::Stop), - Ok(FollowEvent::Stop), - Ok(ev_new_block(1, 2)), - // Nothing should be emitted after an error: - Err(BackendError::other("ended")), - Ok(ev_new_block(2, 3)), - ] - }); - - let s = FollowStream::new(stream_getter); - let out: Vec<_> = s.filter_map(async |e| e.ok()).collect().await; - - // The expected response, given the above. - assert_eq!( - out, - vec![ - FollowStreamMsg::Ready("sub_id_0".to_owned()), - FollowStreamMsg::Event(ev_initialized(1)), - FollowStreamMsg::Event(FollowEvent::Stop), - FollowStreamMsg::Ready("sub_id_2".to_owned()), - FollowStreamMsg::Event(FollowEvent::Stop), - FollowStreamMsg::Ready("sub_id_3".to_owned()), - FollowStreamMsg::Event(ev_new_block(1, 2)), - ] - ); - } -} diff --git a/new/src/backend/chain_head/follow_stream_driver.rs b/new/src/backend/chain_head/follow_stream_driver.rs deleted file mode 100644 index 0324f5ea35..0000000000 --- a/new/src/backend/chain_head/follow_stream_driver.rs +++ /dev/null @@ -1,755 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::follow_stream_unpin::{BlockRef, FollowStreamMsg, FollowStreamUnpin}; -use crate::config::Hash; -use crate::error::{BackendError, RpcError}; -use futures::stream::{Stream, StreamExt}; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::ops::DerefMut; -use std::pin::Pin; -use std::sync::{Arc, Mutex}; -use std::task::{Context, Poll, Waker}; -use subxt_rpcs::methods::chain_head::{FollowEvent, Initialized, RuntimeEvent}; - -/// A `Stream` which builds on `FollowStreamDriver`, and allows multiple subscribers to obtain events -/// from the single underlying subscription (each being provided an `Initialized` message and all new -/// blocks since then, as if they were each creating a unique `chainHead_follow` subscription). This -/// is the "top" layer of our follow stream subscriptions, and the one that's interacted with elsewhere. -#[derive(Debug)] -pub struct FollowStreamDriver { - inner: FollowStreamUnpin, - shared: Shared, -} - -impl FollowStreamDriver { - /// Create a new [`FollowStreamDriver`]. This must be polled by some executor - /// in order for any progress to be made. Things can subscribe to events. - pub fn new(follow_unpin: FollowStreamUnpin) -> Self { - Self { - inner: follow_unpin, - shared: Shared::default(), - } - } - - /// Return a handle from which we can create new subscriptions to follow events. - pub fn handle(&self) -> FollowStreamDriverHandle { - FollowStreamDriverHandle { - shared: self.shared.clone(), - } - } -} - -impl Stream for FollowStreamDriver { - type Item = Result<(), BackendError>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.inner.poll_next_unpin(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(None) => { - // Mark ourselves as done so that everything can end. - self.shared.done(); - Poll::Ready(None) - } - Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))), - Poll::Ready(Some(Ok(item))) => { - // Push item to any subscribers. - self.shared.push_item(item); - Poll::Ready(Some(Ok(()))) - } - } - } -} - -/// A handle that can be used to create subscribers, but that doesn't -/// itself subscribe to events. -#[derive(Debug, Clone)] -pub struct FollowStreamDriverHandle { - shared: Shared, -} - -impl FollowStreamDriverHandle { - /// Subscribe to follow events. - pub fn subscribe(&self) -> FollowStreamDriverSubscription { - self.shared.subscribe() - } -} - -/// A subscription to events from the [`FollowStreamDriver`]. All subscriptions -/// begin first with a `Ready` event containing the current subscription ID, and -/// then with an `Initialized` event containing the latest finalized block and latest -/// runtime information, and then any new/best block events and so on received since -/// the latest finalized block. -#[derive(Debug)] -pub struct FollowStreamDriverSubscription { - id: usize, - done: bool, - shared: Shared, - local_items: VecDeque>>, -} - -impl Stream for FollowStreamDriverSubscription { - type Item = FollowStreamMsg>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if self.done { - return Poll::Ready(None); - } - - loop { - if let Some(item) = self.local_items.pop_front() { - return Poll::Ready(Some(item)); - } - - let items = self.shared.take_items_and_save_waker(self.id, cx.waker()); - - // If no items left, mark locally as done (to avoid further locking) - // and return None to signal done-ness. - let Some(items) = items else { - self.done = true; - return Poll::Ready(None); - }; - - // No items? We've saved the waker so we'll be told when more come. - // Else, save the items locally and loop around to pop from them. - if items.is_empty() { - return Poll::Pending; - } else { - self.local_items = items; - } - } - } -} - -impl FollowStreamDriverSubscription { - /// Return the current subscription ID. If the subscription has stopped, then this will - /// wait until a new subscription has started with a new ID. - pub async fn subscription_id(self) -> Option { - let ready_event = self - .skip_while(|ev| std::future::ready(!matches!(ev, FollowStreamMsg::Ready(_)))) - .next() - .await?; - - match ready_event { - FollowStreamMsg::Ready(sub_id) => Some(sub_id), - _ => None, - } - } - - /// Subscribe to the follow events, ignoring any other messages. - pub fn events(self) -> impl Stream>> + Send + Sync { - self.filter_map(|ev| std::future::ready(ev.into_event())) - } -} - -impl Clone for FollowStreamDriverSubscription { - fn clone(&self) -> Self { - self.shared.subscribe() - } -} - -impl Drop for FollowStreamDriverSubscription { - fn drop(&mut self) { - self.shared.remove_sub(self.id); - } -} - -/// Locked shared state. The driver stream will access this state to push -/// events to any subscribers, and subscribers will access it to pull the -/// events destined for themselves. -#[derive(Debug, Clone)] -struct Shared(Arc>>); - -#[derive(Debug)] -struct SharedState { - done: bool, - next_id: usize, - subscribers: HashMap>, - /// Keep a buffer of all events that should be handed to a new subscription. - block_events_for_new_subscriptions: VecDeque>>, - // Keep track of the subscription ID we send out on new subs. - current_subscription_id: Option, - // Keep track of the init message we send out on new subs. - current_init_message: Option>>, - // Runtime events by block hash; we need to track these to know - // whether the runtime has changed when we see a finalized block notification. - seen_runtime_events: HashMap, -} - -impl Default for Shared { - fn default() -> Self { - Shared(Arc::new(Mutex::new(SharedState { - next_id: 1, - done: false, - subscribers: HashMap::new(), - current_init_message: None, - current_subscription_id: None, - seen_runtime_events: HashMap::new(), - block_events_for_new_subscriptions: VecDeque::new(), - }))) - } -} - -impl Shared { - /// Set the shared state to "done"; no more items will be handed to it. - pub fn done(&self) { - let mut shared = self.0.lock().unwrap(); - shared.done = true; - - // Wake up all subscribers so they get notified that the backend was closed - for details in shared.subscribers.values_mut() { - if let Some(waker) = details.waker.take() { - waker.wake(); - } - } - } - - /// Cleanup a subscription. - pub fn remove_sub(&self, sub_id: usize) { - let mut shared = self.0.lock().unwrap(); - shared.subscribers.remove(&sub_id); - } - - /// Take items for some subscription ID and save the waker. - pub fn take_items_and_save_waker( - &self, - sub_id: usize, - waker: &Waker, - ) -> Option>>> { - let mut shared = self.0.lock().unwrap(); - - let is_done = shared.done; - let details = shared.subscribers.get_mut(&sub_id)?; - - // no more items to pull, and stream closed, so return None. - if details.items.is_empty() && is_done { - return None; - } - - // else, take whatever items, and save the waker if not done yet. - let items = std::mem::take(&mut details.items); - if !is_done { - details.waker = Some(waker.clone()); - } - Some(items) - } - - /// Push a new item out to subscribers. - pub fn push_item(&self, item: FollowStreamMsg>) { - let mut shared = self.0.lock().unwrap(); - let shared = shared.deref_mut(); - - // broadcast item to subscribers: - for details in shared.subscribers.values_mut() { - details.items.push_back(item.clone()); - if let Some(waker) = details.waker.take() { - waker.wake(); - } - } - - // Keep our buffer of ready/block events up-to-date: - match item { - FollowStreamMsg::Ready(sub_id) => { - // Set new subscription ID when it comes in. - shared.current_subscription_id = Some(sub_id); - } - FollowStreamMsg::Event(FollowEvent::Initialized(ev)) => { - // New subscriptions will be given this init message: - shared.current_init_message = Some(ev.clone()); - // Clear block cache (since a new finalized block hash is seen): - shared.block_events_for_new_subscriptions.clear(); - } - FollowStreamMsg::Event(FollowEvent::Finalized(finalized_ev)) => { - // Update the init message that we'll hand out to new subscriptions. If the init message - // is `None` for some reason, we just ignore this step. - if let Some(init_message) = &mut shared.current_init_message { - // Find the latest runtime update that's been finalized. - let newest_runtime = finalized_ev - .finalized_block_hashes - .iter() - .rev() - .filter_map(|h| shared.seen_runtime_events.get(&h.hash()).cloned()) - .next(); - - shared.seen_runtime_events.clear(); - - init_message - .finalized_block_hashes - .clone_from(&finalized_ev.finalized_block_hashes); - - if let Some(runtime_ev) = newest_runtime { - init_message.finalized_block_runtime = Some(runtime_ev); - } - } - - // The last finalized block will be reported as Initialized by our driver, - // therefore there is no need to report NewBlock and BestBlock events for it. - // If the Finalized event reported multiple finalized hashes, we only care about - // the state at the head of the chain, therefore it is correct to remove those as well. - // Idem for the pruned hashes; they will never be reported again and we remove - // them from the window of events. - let to_remove: HashSet = finalized_ev - .finalized_block_hashes - .iter() - .chain(finalized_ev.pruned_block_hashes.iter()) - .map(|h| h.hash()) - .collect(); - - shared - .block_events_for_new_subscriptions - .retain(|ev| match ev { - FollowEvent::NewBlock(new_block_ev) => { - !to_remove.contains(&new_block_ev.block_hash.hash()) - } - FollowEvent::BestBlockChanged(best_block_ev) => { - !to_remove.contains(&best_block_ev.best_block_hash.hash()) - } - _ => true, - }); - } - FollowStreamMsg::Event(FollowEvent::NewBlock(new_block_ev)) => { - // If a new runtime is seen, note it so that when a block is finalized, we - // can associate that with a runtime update having happened. - if let Some(runtime_event) = &new_block_ev.new_runtime { - shared - .seen_runtime_events - .insert(new_block_ev.block_hash.hash(), runtime_event.clone()); - } - - shared - .block_events_for_new_subscriptions - .push_back(FollowEvent::NewBlock(new_block_ev)); - } - FollowStreamMsg::Event(ev @ FollowEvent::BestBlockChanged(_)) => { - shared.block_events_for_new_subscriptions.push_back(ev); - } - FollowStreamMsg::Event(FollowEvent::Stop) => { - // On a stop event, clear everything. Wait for resubscription and new ready/initialised events. - shared.block_events_for_new_subscriptions.clear(); - shared.current_subscription_id = None; - shared.current_init_message = None; - } - _ => { - // We don't buffer any other events. - } - } - } - - /// Create a new subscription. - pub fn subscribe(&self) -> FollowStreamDriverSubscription { - let mut shared = self.0.lock().unwrap(); - - let id = shared.next_id; - shared.next_id += 1; - - shared.subscribers.insert( - id, - SubscriberDetails { - items: VecDeque::new(), - waker: None, - }, - ); - - // Any new subscription should start with a "Ready" message and then an "Initialized" - // message, and then any non-finalized block events since that. If these don't exist, - // it means the subscription is currently stopped, and we should expect new Ready/Init - // messages anyway once it restarts. - let mut local_items = VecDeque::new(); - if let Some(sub_id) = &shared.current_subscription_id { - local_items.push_back(FollowStreamMsg::Ready(sub_id.clone())); - } - if let Some(init_msg) = &shared.current_init_message { - local_items.push_back(FollowStreamMsg::Event(FollowEvent::Initialized( - init_msg.clone(), - ))); - } - for ev in &shared.block_events_for_new_subscriptions { - local_items.push_back(FollowStreamMsg::Event(ev.clone())); - } - - drop(shared); - - FollowStreamDriverSubscription { - id, - done: false, - shared: self.clone(), - local_items, - } - } -} - -/// Details for a given subscriber: any items it's not yet claimed, -/// and a way to wake it up when there are more items for it. -#[derive(Debug)] -struct SubscriberDetails { - items: VecDeque>>, - waker: Option, -} - -/// A stream that subscribes to finalized blocks -/// and indicates whether a block was missed if was restarted. -#[derive(Debug)] -pub struct FollowStreamFinalizedHeads { - stream: FollowStreamDriverSubscription, - sub_id: Option, - last_seen_block: Option>, - f: F, - is_done: bool, -} - -impl Unpin for FollowStreamFinalizedHeads {} - -impl FollowStreamFinalizedHeads -where - H: Hash, - F: Fn(FollowEvent>) -> Vec>, -{ - pub fn new(stream: FollowStreamDriverSubscription, f: F) -> Self { - Self { - stream, - sub_id: None, - last_seen_block: None, - f, - is_done: false, - } - } -} - -impl Stream for FollowStreamFinalizedHeads -where - H: Hash, - F: Fn(FollowEvent>) -> Vec>, -{ - type Item = Result<(String, Vec>), BackendError>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - if self.is_done { - return Poll::Ready(None); - } - - loop { - let Some(ev) = futures::ready!(self.stream.poll_next_unpin(cx)) else { - self.is_done = true; - return Poll::Ready(None); - }; - - let block_refs = match ev { - FollowStreamMsg::Ready(sub_id) => { - self.sub_id = Some(sub_id); - continue; - } - FollowStreamMsg::Event(FollowEvent::Finalized(finalized)) => { - self.last_seen_block = finalized.finalized_block_hashes.last().cloned(); - - (self.f)(FollowEvent::Finalized(finalized)) - } - FollowStreamMsg::Event(FollowEvent::Initialized(mut init)) => { - let prev = self.last_seen_block.take(); - self.last_seen_block = init.finalized_block_hashes.last().cloned(); - - if let Some(p) = prev { - let Some(pos) = init - .finalized_block_hashes - .iter() - .position(|b| b.hash() == p.hash()) - else { - return Poll::Ready(Some(Err(RpcError::ClientError( - subxt_rpcs::Error::DisconnectedWillReconnect( - "Missed at least one block when the connection was lost" - .to_owned(), - ), - ) - .into()))); - }; - - // If we got older blocks than `prev`, we need to remove them - // because they should already have been sent at this point. - init.finalized_block_hashes.drain(0..=pos); - } - - (self.f)(FollowEvent::Initialized(init)) - } - FollowStreamMsg::Event(ev) => (self.f)(ev), - }; - - if block_refs.is_empty() { - continue; - } - - let sub_id = self - .sub_id - .clone() - .expect("Ready is always emitted before any other event"); - - return Poll::Ready(Some(Ok((sub_id, block_refs)))); - } - } -} - -#[cfg(test)] -mod test_utils { - use super::super::follow_stream_unpin::test_utils::test_unpin_stream_getter; - use super::*; - - /// Return a `FollowStreamDriver` - pub fn test_follow_stream_driver_getter( - events: F, - max_life: usize, - ) -> FollowStreamDriver - where - H: Hash + 'static, - F: Fn() -> I + Send + 'static, - I: IntoIterator, BackendError>>, - { - let (stream, _) = test_unpin_stream_getter(events, max_life); - FollowStreamDriver::new(stream) - } -} - -#[cfg(test)] -mod test { - use futures::TryStreamExt; - use primitive_types::H256; - - use super::super::follow_stream::test_utils::{ - ev_best_block, ev_finalized, ev_initialized, ev_new_block, - }; - use super::super::follow_stream_unpin::test_utils::{ - ev_best_block_ref, ev_finalized_ref, ev_initialized_ref, ev_new_block_ref, - }; - use super::test_utils::test_follow_stream_driver_getter; - use super::*; - - #[test] - fn follow_stream_driver_is_sendable() { - fn assert_send(_: T) {} - let stream_getter = test_follow_stream_driver_getter(|| [Ok(ev_initialized(1))], 10); - assert_send(stream_getter); - } - - #[tokio::test] - async fn subscribers_all_receive_events_and_finish_gracefully_on_error() { - let mut driver = test_follow_stream_driver_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(ev_new_block(0, 1)), - Ok(ev_best_block(1)), - Ok(ev_finalized([1], [])), - Err(BackendError::other("ended")), - ] - }, - 10, - ); - - let handle = driver.handle(); - - let a = handle.subscribe(); - let b = handle.subscribe(); - let c = handle.subscribe(); - - // Drive to completion (the sort of real life usage I'd expect): - tokio::spawn(async move { while driver.next().await.is_some() {} }); - - let a_vec: Vec<_> = a.collect().await; - let b_vec: Vec<_> = b.collect().await; - let c_vec: Vec<_> = c.collect().await; - - let expected = vec![ - FollowStreamMsg::Ready("sub_id_0".into()), - FollowStreamMsg::Event(ev_initialized_ref(0)), - FollowStreamMsg::Event(ev_new_block_ref(0, 1)), - FollowStreamMsg::Event(ev_best_block_ref(1)), - FollowStreamMsg::Event(ev_finalized_ref([1])), - ]; - - assert_eq!(a_vec, expected); - assert_eq!(b_vec, expected); - assert_eq!(c_vec, expected); - } - - #[tokio::test] - async fn subscribers_receive_block_events_from_last_finalised() { - let mut driver = test_follow_stream_driver_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(ev_new_block(0, 1)), - Ok(ev_best_block(1)), - Ok(ev_finalized([1], [])), - Ok(ev_new_block(1, 2)), - Ok(ev_new_block(2, 3)), - Err(BackendError::other("ended")), - ] - }, - 10, - ); - - // Skip past ready, init, new, best events. - let _r = driver.next().await.unwrap(); - let _i0 = driver.next().await.unwrap(); - let _n1 = driver.next().await.unwrap(); - let _b1 = driver.next().await.unwrap(); - - // THEN subscribe; subscription should still receive them: - let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await; - let expected = vec![ - FollowStreamMsg::Ready("sub_id_0".into()), - FollowStreamMsg::Event(ev_initialized_ref(0)), - FollowStreamMsg::Event(ev_new_block_ref(0, 1)), - FollowStreamMsg::Event(ev_best_block_ref(1)), - ]; - assert_eq!(evs, expected); - - // Skip past finalized 1, new 2, new 3 events - let _f1 = driver.next().await.unwrap(); - let _n2 = driver.next().await.unwrap(); - let _n3 = driver.next().await.unwrap(); - - // THEN subscribe again; new subs will see an updated initialized message - // with the latest finalized block hash. - let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await; - let expected = vec![ - FollowStreamMsg::Ready("sub_id_0".into()), - FollowStreamMsg::Event(ev_initialized_ref(1)), - FollowStreamMsg::Event(ev_new_block_ref(1, 2)), - FollowStreamMsg::Event(ev_new_block_ref(2, 3)), - ]; - assert_eq!(evs, expected); - } - - #[tokio::test] - async fn subscribers_receive_new_blocks_before_subscribing() { - let mut driver = test_follow_stream_driver_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(ev_new_block(0, 1)), - Ok(ev_best_block(1)), - Ok(ev_new_block(1, 2)), - Ok(ev_new_block(2, 3)), - Ok(ev_finalized([1], [])), - Err(BackendError::other("ended")), - ] - }, - 10, - ); - - // Skip to the first finalized block F1. - let _r = driver.next().await.unwrap(); - let _i0 = driver.next().await.unwrap(); - let _n1 = driver.next().await.unwrap(); - let _b1 = driver.next().await.unwrap(); - let _n2 = driver.next().await.unwrap(); - let _n3 = driver.next().await.unwrap(); - let _f1 = driver.next().await.unwrap(); - - // THEN subscribe; and make sure new block 1 and 2 are received. - let evs: Vec<_> = driver.handle().subscribe().take(4).collect().await; - let expected = vec![ - FollowStreamMsg::Ready("sub_id_0".into()), - FollowStreamMsg::Event(ev_initialized_ref(1)), - FollowStreamMsg::Event(ev_new_block_ref(1, 2)), - FollowStreamMsg::Event(ev_new_block_ref(2, 3)), - ]; - assert_eq!(evs, expected); - } - - #[tokio::test] - async fn subscribe_finalized_blocks_restart_works() { - let mut driver = test_follow_stream_driver_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(ev_new_block(0, 1)), - Ok(ev_best_block(1)), - Ok(ev_finalized([1], [])), - Ok(FollowEvent::Stop), - Ok(ev_initialized(1)), - Ok(ev_finalized([2], [])), - Err(BackendError::other("ended")), - ] - }, - 10, - ); - - let handle = driver.handle(); - - tokio::spawn(async move { while driver.next().await.is_some() {} }); - - let f = |ev| match ev { - FollowEvent::Finalized(ev) => ev.finalized_block_hashes, - FollowEvent::Initialized(ev) => ev.finalized_block_hashes, - _ => vec![], - }; - - let stream = FollowStreamFinalizedHeads::new(handle.subscribe(), f); - let evs: Vec<_> = stream.try_collect().await.unwrap(); - - let expected = vec![ - ( - "sub_id_0".to_string(), - vec![BlockRef::new(H256::from_low_u64_le(0))], - ), - ( - "sub_id_0".to_string(), - vec![BlockRef::new(H256::from_low_u64_le(1))], - ), - ( - "sub_id_5".to_string(), - vec![BlockRef::new(H256::from_low_u64_le(2))], - ), - ]; - assert_eq!(evs, expected); - } - - #[tokio::test] - async fn subscribe_finalized_blocks_restart_with_missed_blocks() { - let mut driver = test_follow_stream_driver_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(FollowEvent::Stop), - // Emulate that we missed some blocks. - Ok(ev_initialized(13)), - Ok(ev_finalized([14], [])), - Err(BackendError::other("ended")), - ] - }, - 10, - ); - - let handle = driver.handle(); - - tokio::spawn(async move { while driver.next().await.is_some() {} }); - - let f = |ev| match ev { - FollowEvent::Finalized(ev) => ev.finalized_block_hashes, - FollowEvent::Initialized(ev) => ev.finalized_block_hashes, - _ => vec![], - }; - - let evs: Vec<_> = FollowStreamFinalizedHeads::new(handle.subscribe(), f) - .collect() - .await; - - assert_eq!( - evs[0].as_ref().unwrap(), - &( - "sub_id_0".to_string(), - vec![BlockRef::new(H256::from_low_u64_le(0))] - ) - ); - assert!( - matches!(&evs[1], Err(BackendError::Rpc(RpcError::ClientError(subxt_rpcs::Error::DisconnectedWillReconnect(e)))) if e.contains("Missed at least one block when the connection was lost")) - ); - assert_eq!( - evs[2].as_ref().unwrap(), - &( - "sub_id_2".to_string(), - vec![BlockRef::new(H256::from_low_u64_le(14))] - ) - ); - } -} diff --git a/new/src/backend/chain_head/follow_stream_unpin.rs b/new/src/backend/chain_head/follow_stream_unpin.rs deleted file mode 100644 index b8e9c144f8..0000000000 --- a/new/src/backend/chain_head/follow_stream_unpin.rs +++ /dev/null @@ -1,813 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::ChainHeadRpcMethods; -use super::follow_stream::FollowStream; -use crate::config::{Config, Hash, HashFor, RpcConfigFor}; -use crate::error::BackendError; -use futures::stream::{FuturesUnordered, Stream, StreamExt}; -use subxt_rpcs::methods::chain_head::{ - BestBlockChanged, Finalized, FollowEvent, Initialized, NewBlock, -}; - -use std::collections::{HashMap, HashSet}; -use std::future::Future; -use std::pin::Pin; -use std::sync::{Arc, Mutex}; -use std::task::{Context, Poll, Waker}; - -/// The type of stream item. -pub use super::follow_stream::FollowStreamMsg; - -/// A `Stream` which builds on `FollowStream`, and handles pinning. It replaces any block hash seen in -/// the follow events with a `BlockRef` which, when all clones are dropped, will lead to an "unpin" call -/// for that block hash being queued. It will also automatically unpin any blocks that exceed a given max -/// age, to try and prevent the underlying stream from ending (and _all_ blocks from being unpinned as a -/// result). Put simply, it tries to keep every block pinned as long as possible until the block is no longer -/// used anywhere. -#[derive(Debug)] -pub struct FollowStreamUnpin { - // The underlying stream of events. - inner: FollowStream, - // A method to call to unpin a block, given a block hash and a subscription ID. - unpin_method: UnpinMethodHolder, - // Futures for sending unpin events that we'll poll to completion as - // part of polling the stream as a whole. - unpin_futs: FuturesUnordered, - // Each time a new finalized block is seen, we give it an age of `next_rel_block_age`, - // and then increment this ready for the next finalized block. So, the first finalized - // block will have an age of 0, the next 1, 2, 3 and so on. We can then use `max_block_life` - // to say "unpin all blocks with an age < (next_rel_block_age-1) - max_block_life". - next_rel_block_age: usize, - // The latest ID of the FollowStream subscription, which we can use - // to unpin blocks. - subscription_id: Option>, - // The longest period a block can be pinned for. - max_block_life: usize, - // The currently seen and pinned blocks. - pinned: HashMap>, - // Shared state about blocks we've flagged to unpin from elsewhere - unpin_flags: UnpinFlags, -} - -// Just a wrapper to make implementing debug on the whole thing easier. -struct UnpinMethodHolder(UnpinMethod); -impl std::fmt::Debug for UnpinMethodHolder { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "UnpinMethodHolder(Box) -> UnpinFut>)" - ) - } -} - -/// The type of the unpin method that we need to provide. -pub type UnpinMethod = Box) -> UnpinFut + Send>; - -/// The future returned from [`UnpinMethod`]. -pub type UnpinFut = Pin + Send + 'static>>; - -impl std::marker::Unpin for FollowStreamUnpin {} - -impl Stream for FollowStreamUnpin { - type Item = Result>, BackendError>; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.as_mut(); - - loop { - // Poll any queued unpin tasks. - let unpin_futs_are_pending = match this.unpin_futs.poll_next_unpin(cx) { - Poll::Ready(Some(())) => continue, - Poll::Ready(None) => false, - Poll::Pending => true, - }; - - // Poll the inner stream for the next event. - let Poll::Ready(ev) = this.inner.poll_next_unpin(cx) else { - return Poll::Pending; - }; - - let Some(ev) = ev else { - // if the stream is done, but `unpin_futs` are still pending, then - // return pending here so that they are still driven to completion. - // Else, return `Ready(None)` to signal nothing left to do. - return match unpin_futs_are_pending { - true => Poll::Pending, - false => Poll::Ready(None), - }; - }; - - // Error? just return it and do nothing further. - let ev = match ev { - Ok(ev) => ev, - Err(e) => { - return Poll::Ready(Some(Err(e))); - } - }; - - // React to any actual FollowEvent we get back. - let ev = match ev { - FollowStreamMsg::Ready(subscription_id) => { - // update the subscription ID we'll use to unpin things. - this.subscription_id = Some(subscription_id.clone().into()); - - FollowStreamMsg::Ready(subscription_id) - } - FollowStreamMsg::Event(FollowEvent::Initialized(details)) => { - let mut finalized_block_hashes = - Vec::with_capacity(details.finalized_block_hashes.len()); - - // Pin each of the finalized blocks. None of them will show up again (except as a - // parent block), and so they can all be unpinned immediately at any time. Increment - // the block age for each one, so that older finalized blocks are pruned first. - for finalized_block in &details.finalized_block_hashes { - let rel_block_age = this.next_rel_block_age; - let block_ref = - this.pin_unpinnable_block_at(rel_block_age, *finalized_block); - - finalized_block_hashes.push(block_ref); - this.next_rel_block_age += 1; - } - - FollowStreamMsg::Event(FollowEvent::Initialized(Initialized { - finalized_block_hashes, - finalized_block_runtime: details.finalized_block_runtime, - })) - } - FollowStreamMsg::Event(FollowEvent::NewBlock(details)) => { - // One bigger than our parent, and if no parent seen (maybe it was - // unpinned already), then one bigger than the last finalized block num - // as a best guess. - let parent_rel_block_age = this - .pinned - .get(&details.parent_block_hash) - .map(|p| p.rel_block_age) - .unwrap_or(this.next_rel_block_age.saturating_sub(1)); - - let block_ref = this.pin_block_at(parent_rel_block_age + 1, details.block_hash); - let parent_block_ref = - this.pin_block_at(parent_rel_block_age, details.parent_block_hash); - - FollowStreamMsg::Event(FollowEvent::NewBlock(NewBlock { - block_hash: block_ref, - parent_block_hash: parent_block_ref, - new_runtime: details.new_runtime, - })) - } - FollowStreamMsg::Event(FollowEvent::BestBlockChanged(details)) => { - // We expect this block to already exist, so it'll keep its existing block_num, - // but worst case it'll just get the current finalized block_num + 1. - let rel_block_age = this.next_rel_block_age; - let block_ref = this.pin_block_at(rel_block_age, details.best_block_hash); - - FollowStreamMsg::Event(FollowEvent::BestBlockChanged(BestBlockChanged { - best_block_hash: block_ref, - })) - } - FollowStreamMsg::Event(FollowEvent::Finalized(details)) => { - let finalized_block_refs: Vec<_> = details - .finalized_block_hashes - .into_iter() - .enumerate() - .map(|(idx, hash)| { - // These blocks _should_ exist already and so will have a known block num, - // but if they don't, we just increment the num from the last finalized block - // we saw, which should be accurate. - // - // `pin_unpinnable_block_at` indicates that the block will not show up in future events - // (They will show up as a parent block, but we don't care about that right now). - let rel_block_age = this.next_rel_block_age + idx; - this.pin_unpinnable_block_at(rel_block_age, hash) - }) - .collect(); - - // Our relative block height is increased by however many finalized - // blocks we've seen. - this.next_rel_block_age += finalized_block_refs.len(); - - let pruned_block_refs: Vec<_> = details - .pruned_block_hashes - .into_iter() - .map(|hash| { - // We should know about these, too, and if not we set their age to last_finalized + 1. - // - // `pin_unpinnable_block_at` indicates that the block will not show up in future events. - let rel_block_age = this.next_rel_block_age; - this.pin_unpinnable_block_at(rel_block_age, hash) - }) - .collect(); - - // At this point, we also check to see which blocks we should submit unpin events - // for. We will unpin: - // - Any block that's older than the max age. - // - Any block that has no references left (ie has been dropped) that _also_ has - // showed up in the pruned list in a finalized event (so it will never be in another event). - this.unpin_blocks(cx.waker()); - - FollowStreamMsg::Event(FollowEvent::Finalized(Finalized { - finalized_block_hashes: finalized_block_refs, - pruned_block_hashes: pruned_block_refs, - })) - } - FollowStreamMsg::Event(FollowEvent::Stop) => { - // clear out "old" things that are no longer applicable since - // the subscription has ended (a new one will be created under the hood, at - // which point we'll get given a new subscription ID. - this.subscription_id = None; - this.pinned.clear(); - this.unpin_futs.clear(); - this.unpin_flags.lock().unwrap().clear(); - this.next_rel_block_age = 0; - - FollowStreamMsg::Event(FollowEvent::Stop) - } - // These events aren't interesting; we just forward them on: - FollowStreamMsg::Event(FollowEvent::OperationBodyDone(details)) => { - FollowStreamMsg::Event(FollowEvent::OperationBodyDone(details)) - } - FollowStreamMsg::Event(FollowEvent::OperationCallDone(details)) => { - FollowStreamMsg::Event(FollowEvent::OperationCallDone(details)) - } - FollowStreamMsg::Event(FollowEvent::OperationStorageItems(details)) => { - FollowStreamMsg::Event(FollowEvent::OperationStorageItems(details)) - } - FollowStreamMsg::Event(FollowEvent::OperationWaitingForContinue(details)) => { - FollowStreamMsg::Event(FollowEvent::OperationWaitingForContinue(details)) - } - FollowStreamMsg::Event(FollowEvent::OperationStorageDone(details)) => { - FollowStreamMsg::Event(FollowEvent::OperationStorageDone(details)) - } - FollowStreamMsg::Event(FollowEvent::OperationInaccessible(details)) => { - FollowStreamMsg::Event(FollowEvent::OperationInaccessible(details)) - } - FollowStreamMsg::Event(FollowEvent::OperationError(details)) => { - FollowStreamMsg::Event(FollowEvent::OperationError(details)) - } - }; - - // Return our event. - return Poll::Ready(Some(Ok(ev))); - } - } -} - -impl FollowStreamUnpin { - /// Create a new [`FollowStreamUnpin`]. - pub fn new( - follow_stream: FollowStream, - unpin_method: UnpinMethod, - max_block_life: usize, - ) -> Self { - Self { - inner: follow_stream, - unpin_method: UnpinMethodHolder(unpin_method), - max_block_life, - pinned: Default::default(), - subscription_id: None, - next_rel_block_age: 0, - unpin_flags: Default::default(), - unpin_futs: Default::default(), - } - } - - /// Create a new [`FollowStreamUnpin`] given the RPC methods. - pub fn from_methods( - follow_stream: FollowStream>, - methods: ChainHeadRpcMethods>, - max_block_life: usize, - ) -> FollowStreamUnpin> { - let unpin_method = Box::new(move |hash: HashFor, sub_id: Arc| { - let methods = methods.clone(); - let fut: UnpinFut = Box::pin(async move { - // We ignore any errors trying to unpin at the moment. - let _ = methods.chainhead_v1_unpin(&sub_id, hash).await; - }); - fut - }); - - FollowStreamUnpin::new(follow_stream, unpin_method, max_block_life) - } - - /// Is the block hash currently pinned. - pub fn is_pinned(&self, hash: &H) -> bool { - self.pinned.contains_key(hash) - } - - /// Pin a block, or return the reference to an already-pinned block. If the block has been registered to - /// be unpinned, we'll clear those flags, so that it won't be unpinned. If the unpin request has already - /// been sent though, then the block will be unpinned. - fn pin_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef { - self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, false) - } - - /// Pin a block, or return the reference to an already-pinned block. - /// - /// This is the same as [`Self::pin_block_at`], except that it also marks the block as being unpinnable now, - /// which should be done for any block that will no longer be seen in future events. - fn pin_unpinnable_block_at(&mut self, rel_block_age: usize, hash: H) -> BlockRef { - self.pin_block_at_setting_unpinnable_flag(rel_block_age, hash, true) - } - - fn pin_block_at_setting_unpinnable_flag( - &mut self, - rel_block_age: usize, - hash: H, - can_be_unpinned: bool, - ) -> BlockRef { - let entry = self - .pinned - .entry(hash) - // If there's already an entry, then clear any unpin_flags and update the - // can_be_unpinned status (this can become true but cannot become false again - // once true). - .and_modify(|entry| { - entry.can_be_unpinned = entry.can_be_unpinned || can_be_unpinned; - self.unpin_flags.lock().unwrap().remove(&hash); - }) - // If there's not an entry already, make one and return it. - .or_insert_with(|| PinnedDetails { - rel_block_age, - block_ref: BlockRef { - inner: Arc::new(BlockRefInner { - hash, - unpin_flags: self.unpin_flags.clone(), - }), - }, - can_be_unpinned, - }); - - entry.block_ref.clone() - } - - /// Unpin any blocks that are either too old, or have the unpin flag set and are old enough. - fn unpin_blocks(&mut self, waker: &Waker) { - let mut unpin_flags = self.unpin_flags.lock().unwrap(); - - // This gets the age of the last finalized block. - let rel_block_age = self.next_rel_block_age.saturating_sub(1); - - // If we asked to unpin and there was no subscription_id, then there's nothing we can do, - // and nothing will need unpinning now anyway. - let Some(sub_id) = &self.subscription_id else { - return; - }; - - let mut blocks_to_unpin = vec![]; - for (hash, details) in &self.pinned { - if rel_block_age.saturating_sub(details.rel_block_age) >= self.max_block_life - || (unpin_flags.contains(hash) && details.can_be_unpinned) - { - // The block is too old, or it's been flagged to be unpinned and won't be in a future - // backend event, so we can unpin it for real now. - blocks_to_unpin.push(*hash); - // Clear it from our unpin flags if present so that we don't try to unpin it again. - unpin_flags.remove(hash); - } - } - - // Release our lock on unpin_flags ASAP. - drop(unpin_flags); - - // No need to call the waker etc if nothing to do: - if blocks_to_unpin.is_empty() { - return; - } - - for hash in blocks_to_unpin { - self.pinned.remove(&hash); - let fut = (self.unpin_method.0)(hash, sub_id.clone()); - self.unpin_futs.push(fut); - } - - // Any new futures pushed above need polling to start. We could - // just wait for the next stream event, but let's wake the task to - // have it polled sooner, just in case it's slow to receive things. - waker.wake_by_ref(); - } -} - -// The set of block hashes that can be unpinned when ready. -// BlockRefs write to this when they are dropped. -type UnpinFlags = Arc>>; - -#[derive(Debug)] -struct PinnedDetails { - /// Relatively speaking, how old is the block? When we start following - /// blocks, the first finalized block gets an age of 0, the second an age - /// of 1 and so on. - rel_block_age: usize, - /// A block ref we can hand out to keep blocks pinned. - /// Because we store one here until it's unpinned, the live count - /// will only drop to 1 when no external refs are left. - block_ref: BlockRef, - /// Has this block showed up in the list of pruned blocks, or has it - /// been finalized? In this case, it can now been pinned as it won't - /// show up again in future events (except as a "parent block" of some - /// new block, which we're currently ignoring). - can_be_unpinned: bool, -} - -/// All blocks reported will be wrapped in this. -#[derive(Debug, Clone)] -pub struct BlockRef { - inner: Arc>, -} - -#[derive(Debug)] -struct BlockRefInner { - hash: H, - unpin_flags: UnpinFlags, -} - -impl BlockRef { - /// For testing purposes only, create a BlockRef from a hash - /// that isn't pinned. - #[cfg(test)] - pub fn new(hash: H) -> Self { - BlockRef { - inner: Arc::new(BlockRefInner { - hash, - unpin_flags: Default::default(), - }), - } - } - - /// Return the hash for this block. - pub fn hash(&self) -> H { - self.inner.hash - } -} - -impl PartialEq for BlockRef { - fn eq(&self, other: &Self) -> bool { - self.inner.hash == other.inner.hash - } -} - -impl PartialEq for BlockRef { - fn eq(&self, other: &H) -> bool { - &self.inner.hash == other - } -} - -impl Drop for BlockRef { - fn drop(&mut self) { - // PinnedDetails keeps one ref, so if this is the second ref, it's the - // only "external" one left and we should ask to unpin it now. if it's - // the only ref remaining, it means that it's already been unpinned, so - // nothing to do here anyway. - if Arc::strong_count(&self.inner) == 2 { - if let Ok(mut unpin_flags) = self.inner.unpin_flags.lock() { - unpin_flags.insert(self.inner.hash); - } - } - } -} - -#[cfg(test)] -pub(super) mod test_utils { - use super::super::follow_stream::{FollowStream, test_utils::test_stream_getter}; - use super::*; - use crate::config::substrate::H256; - - pub type UnpinRx = std::sync::mpsc::Receiver<(H, Arc)>; - - /// Get a [`FollowStreamUnpin`] from an iterator over events. - pub fn test_unpin_stream_getter( - events: F, - max_life: usize, - ) -> (FollowStreamUnpin, UnpinRx) - where - H: Hash + 'static, - F: Fn() -> I + Send + 'static, - I: IntoIterator, BackendError>>, - { - // Unpin requests will come here so that we can look out for them. - let (unpin_tx, unpin_rx) = std::sync::mpsc::channel(); - - let follow_stream = FollowStream::new(test_stream_getter(events)); - let unpin_method: UnpinMethod = Box::new(move |hash, sub_id| { - unpin_tx.send((hash, sub_id)).unwrap(); - Box::pin(std::future::ready(())) - }); - - let follow_unpin = FollowStreamUnpin::new(follow_stream, unpin_method, max_life); - (follow_unpin, unpin_rx) - } - - /// Assert that the unpinned blocks sent from the `UnpinRx` channel match the items given. - pub fn assert_from_unpin_rx( - unpin_rx: &UnpinRx, - items: impl IntoIterator, - ) { - let expected_hashes = HashSet::::from_iter(items); - for i in 0..expected_hashes.len() { - let Ok((hash, _)) = unpin_rx.try_recv() else { - panic!("Another unpin event is expected, but failed to pull item {i} from channel"); - }; - assert!( - expected_hashes.contains(&hash), - "Hash {hash:?} was unpinned, but is not expected to have been" - ); - } - } - - /// An initialized event containing a BlockRef (useful for comparisons) - pub fn ev_initialized_ref(n: u64) -> FollowEvent> { - FollowEvent::Initialized(Initialized { - finalized_block_hashes: vec![BlockRef::new(H256::from_low_u64_le(n))], - finalized_block_runtime: None, - }) - } - - /// A new block event containing a BlockRef (useful for comparisons) - pub fn ev_new_block_ref(parent: u64, n: u64) -> FollowEvent> { - FollowEvent::NewBlock(NewBlock { - parent_block_hash: BlockRef::new(H256::from_low_u64_le(parent)), - block_hash: BlockRef::new(H256::from_low_u64_le(n)), - new_runtime: None, - }) - } - - /// A best block event containing a BlockRef (useful for comparisons) - pub fn ev_best_block_ref(n: u64) -> FollowEvent> { - FollowEvent::BestBlockChanged(BestBlockChanged { - best_block_hash: BlockRef::new(H256::from_low_u64_le(n)), - }) - } - - /// A finalized event containing a BlockRef (useful for comparisons) - pub fn ev_finalized_ref(ns: impl IntoIterator) -> FollowEvent> { - FollowEvent::Finalized(Finalized { - finalized_block_hashes: ns - .into_iter() - .map(|h| BlockRef::new(H256::from_low_u64_le(h))) - .collect(), - pruned_block_hashes: vec![], - }) - } -} - -#[cfg(test)] -mod test { - use super::super::follow_stream::test_utils::{ - ev_best_block, ev_finalized, ev_initialized, ev_new_block, - }; - use super::test_utils::{assert_from_unpin_rx, ev_new_block_ref, test_unpin_stream_getter}; - use super::*; - use crate::config::substrate::H256; - - #[tokio::test] - async fn hands_back_blocks() { - let (follow_unpin, _) = test_unpin_stream_getter( - || { - [ - Ok(ev_new_block(0, 1)), - Ok(ev_new_block(1, 2)), - Ok(ev_new_block(2, 3)), - Err(BackendError::other("ended")), - ] - }, - 10, - ); - - let out: Vec<_> = follow_unpin.filter_map(async |e| e.ok()).collect().await; - - assert_eq!( - out, - vec![ - FollowStreamMsg::Ready("sub_id_0".into()), - FollowStreamMsg::Event(ev_new_block_ref(0, 1)), - FollowStreamMsg::Event(ev_new_block_ref(1, 2)), - FollowStreamMsg::Event(ev_new_block_ref(2, 3)), - ] - ); - } - - #[tokio::test] - async fn unpins_initialized_block() { - let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(ev_finalized([1], [])), - Err(BackendError::other("ended")), - ] - }, - 3, - ); - - let _r = follow_unpin.next().await.unwrap().unwrap(); - - // Drop the initialized block: - let i0 = follow_unpin.next().await.unwrap().unwrap(); - drop(i0); - - // Let a finalization event occur. - let _f1 = follow_unpin.next().await.unwrap().unwrap(); - - // Now, initialized block should be unpinned. - assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]); - assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(0))); - } - - #[tokio::test] - async fn unpins_old_blocks() { - let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(ev_finalized([1], [])), - Ok(ev_finalized([2], [])), - Ok(ev_finalized([3], [])), - Ok(ev_finalized([4], [])), - Ok(ev_finalized([5], [])), - Err(BackendError::other("ended")), - ] - }, - 3, - ); - - let _r = follow_unpin.next().await.unwrap().unwrap(); - let _i0 = follow_unpin.next().await.unwrap().unwrap(); - unpin_rx.try_recv().expect_err("nothing unpinned yet"); - let _f1 = follow_unpin.next().await.unwrap().unwrap(); - unpin_rx.try_recv().expect_err("nothing unpinned yet"); - let _f2 = follow_unpin.next().await.unwrap().unwrap(); - unpin_rx.try_recv().expect_err("nothing unpinned yet"); - let _f3 = follow_unpin.next().await.unwrap().unwrap(); - - // Max age is 3, so after block 3 finalized, block 0 becomes too old and is unpinned. - assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]); - - let _f4 = follow_unpin.next().await.unwrap().unwrap(); - - // Block 1 is now too old and is unpinned. - assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); - - let _f5 = follow_unpin.next().await.unwrap().unwrap(); - - // Block 2 is now too old and is unpinned. - assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(2)]); - } - - #[tokio::test] - async fn dropped_new_blocks_should_not_get_unpinned_until_finalization() { - let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(ev_new_block(0, 1)), - Ok(ev_new_block(1, 2)), - Ok(ev_finalized([1], [])), - Ok(ev_finalized([2], [])), - Err(BackendError::other("ended")), - ] - }, - 10, - ); - - let _r = follow_unpin.next().await.unwrap().unwrap(); - let _i0 = follow_unpin.next().await.unwrap().unwrap(); - - let n1 = follow_unpin.next().await.unwrap().unwrap(); - drop(n1); - let n2 = follow_unpin.next().await.unwrap().unwrap(); - drop(n2); - - // New blocks dropped but still pinned: - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1))); - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); - - let f1 = follow_unpin.next().await.unwrap().unwrap(); - drop(f1); - - // After block 1 finalized, both blocks are still pinned because: - // - block 1 was handed back in the finalized event, so will be unpinned next time. - // - block 2 wasn't mentioned in the finalized event, so should not have been unpinned yet. - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1))); - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); - - let f2 = follow_unpin.next().await.unwrap().unwrap(); - drop(f2); - - // After block 2 finalized, block 1 can be unpinned finally, but block 2 needs to wait one more event. - assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1))); - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); - assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); - } - - #[tokio::test] - async fn dropped_new_blocks_should_not_get_unpinned_until_pruned() { - let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(ev_new_block(0, 1)), - Ok(ev_new_block(1, 2)), - Ok(ev_new_block(1, 3)), - Ok(ev_finalized([1], [])), - Ok(ev_finalized([2], [3])), - Ok(ev_finalized([4], [])), - Err(BackendError::other("ended")), - ] - }, - 10, - ); - - let _r = follow_unpin.next().await.unwrap().unwrap(); - let _i0 = follow_unpin.next().await.unwrap().unwrap(); - - let n1 = follow_unpin.next().await.unwrap().unwrap(); - drop(n1); - let n2 = follow_unpin.next().await.unwrap().unwrap(); - drop(n2); - let n3 = follow_unpin.next().await.unwrap().unwrap(); - drop(n3); - - let f1 = follow_unpin.next().await.unwrap().unwrap(); - drop(f1); - - // After block 1 is finalized, everything is still pinned because the finalization event - // itself returns 1, and 2/3 aren't finalized or pruned yet. - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(1))); - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(3))); - - let f2 = follow_unpin.next().await.unwrap().unwrap(); - drop(f2); - - // After the next finalization event, block 1 can finally be unpinned since it was Finalized - // last event _and_ is no longer handed back anywhere. 2 and 3 should still be pinned. - assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1))); - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(2))); - assert!(follow_unpin.is_pinned(&H256::from_low_u64_le(3))); - assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); - - let f4 = follow_unpin.next().await.unwrap().unwrap(); - drop(f4); - - // After some other finalized event, we are now allowed to ditch the previously pruned and - // finalized blocks 2 and 3. - assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(2))); - assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(3))); - assert_from_unpin_rx( - &unpin_rx, - [H256::from_low_u64_le(2), H256::from_low_u64_le(3)], - ); - } - - #[tokio::test] - async fn never_unpin_new_block_before_finalized() { - // Ensure that if we drop a new block; the pinning is still active until the block is finalized. - let (mut follow_unpin, unpin_rx) = test_unpin_stream_getter( - || { - [ - Ok(ev_initialized(0)), - Ok(ev_new_block(0, 1)), - Ok(ev_new_block(1, 2)), - Ok(ev_best_block(1)), - Ok(ev_finalized([1], [])), - Ok(ev_finalized([2], [])), - Err(BackendError::other("ended")), - ] - }, - 10, - ); - - let _r = follow_unpin.next().await.unwrap().unwrap(); - - // drop initialised block 0 and new block 1 and new block 2. - let i0 = follow_unpin.next().await.unwrap().unwrap(); - drop(i0); - let n1 = follow_unpin.next().await.unwrap().unwrap(); - drop(n1); - let n2 = follow_unpin.next().await.unwrap().unwrap(); - drop(n2); - let b1 = follow_unpin.next().await.unwrap().unwrap(); - drop(b1); - - // Nothing unpinned yet! - unpin_rx.try_recv().expect_err("nothing unpinned yet"); - - let f1 = follow_unpin.next().await.unwrap().unwrap(); - drop(f1); - - // After finalization, block 1 is now ready to be unpinned (it won't be seen again), - // but isn't actually unpinned yet (because it was just handed back in f1). Block 0 - // however has now been unpinned. - assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(0))); - assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(0)]); - unpin_rx.try_recv().expect_err("nothing unpinned yet"); - - let f2 = follow_unpin.next().await.unwrap().unwrap(); - drop(f2); - - // After f2, we can get rid of block 1 now, which was finalized last time. - assert!(!follow_unpin.is_pinned(&H256::from_low_u64_le(1))); - assert_from_unpin_rx(&unpin_rx, [H256::from_low_u64_le(1)]); - unpin_rx.try_recv().expect_err("nothing unpinned yet"); - } -} diff --git a/new/src/backend/chain_head/storage_items.rs b/new/src/backend/chain_head/storage_items.rs deleted file mode 100644 index 31cbea8c09..0000000000 --- a/new/src/backend/chain_head/storage_items.rs +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::follow_stream_driver::FollowStreamDriverHandle; -use super::follow_stream_unpin::BlockRef; -use crate::config::{Config, HashFor, RpcConfigFor}; -use crate::error::{BackendError, RpcError}; -use futures::{FutureExt, Stream, StreamExt}; -use std::collections::VecDeque; -use std::future::Future; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use subxt_rpcs::methods::chain_head::{ - ChainHeadRpcMethods, FollowEvent, MethodResponse, StorageQuery, StorageResult, -}; - -/// Obtain a stream of storage items given some query. this handles continuing -/// and stopping under the hood, and returns a stream of `StorageResult`s. -pub struct StorageItems { - done: bool, - operation_id: Arc, - buffered_responses: VecDeque, - continue_call: ContinueFutGetter, - continue_fut: Option, - follow_event_stream: FollowEventStream>, -} - -impl StorageItems { - // Subscribe to follow events, and return a stream of storage results - // given some storage queries. The stream will automatically resume as - // needed, and stop when done. - pub async fn from_methods( - queries: impl Iterator>, - at: HashFor, - follow_handle: &FollowStreamDriverHandle>, - methods: ChainHeadRpcMethods>, - ) -> Result { - let sub_id = super::get_subscription_id(follow_handle).await?; - - // Subscribe to events and make the initial request to get an operation ID. - let follow_events = follow_handle.subscribe().events(); - let status = methods - .chainhead_v1_storage(&sub_id, at, queries, None) - .await?; - let operation_id: Arc = match status { - MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()), - MethodResponse::Started(s) => s.operation_id.into(), - }; - - // A function which returns the call to continue the subscription: - let continue_call: ContinueFutGetter = { - let operation_id = operation_id.clone(); - Box::new(move || { - let sub_id = sub_id.clone(); - let operation_id = operation_id.clone(); - let methods = methods.clone(); - - Box::pin(async move { - methods - .chainhead_v1_continue(&sub_id, &operation_id) - .await?; - Ok(()) - }) - }) - }; - - Ok(StorageItems::new( - operation_id, - continue_call, - Box::pin(follow_events), - )) - } - - fn new( - operation_id: Arc, - continue_call: ContinueFutGetter, - follow_event_stream: FollowEventStream>, - ) -> Self { - Self { - done: false, - buffered_responses: VecDeque::new(), - operation_id, - continue_call, - continue_fut: None, - follow_event_stream, - } - } -} - -pub type FollowEventStream = - Pin>> + Send + 'static>>; -pub type ContinueFutGetter = Box ContinueFut + Send + 'static>; -pub type ContinueFut = Pin> + Send + 'static>>; - -impl Stream for StorageItems { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - loop { - if self.done { - return Poll::Ready(None); - } - - if let Some(item) = self.buffered_responses.pop_front() { - return Poll::Ready(Some(Ok(item))); - } - - if let Some(mut fut) = self.continue_fut.take() { - match fut.poll_unpin(cx) { - Poll::Pending => { - self.continue_fut = Some(fut); - return Poll::Pending; - } - Poll::Ready(Err(e)) => { - if e.is_disconnected_will_reconnect() { - self.continue_fut = Some((self.continue_call)()); - continue; - } - - self.done = true; - return Poll::Ready(Some(Err(e))); - } - Poll::Ready(Ok(())) => { - // Finished; carry on. - } - } - } - - let ev = match self.follow_event_stream.poll_next_unpin(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(None) => return Poll::Ready(None), - Poll::Ready(Some(ev)) => ev, - }; - - match ev { - FollowEvent::OperationWaitingForContinue(id) - if id.operation_id == *self.operation_id => - { - // Start a call to ask for more events - self.continue_fut = Some((self.continue_call)()); - continue; - } - FollowEvent::OperationStorageDone(id) if id.operation_id == *self.operation_id => { - // We're finished! - self.done = true; - return Poll::Ready(None); - } - FollowEvent::OperationStorageItems(items) - if items.operation_id == *self.operation_id => - { - // We have items; buffer them to emit next loops. - self.buffered_responses = items.items; - continue; - } - FollowEvent::OperationError(err) if err.operation_id == *self.operation_id => { - // Something went wrong obtaining storage items; mark as done and return the error. - self.done = true; - return Poll::Ready(Some(Err(BackendError::other(err.error)))); - } - _ => { - // We don't care about this event; wait for the next. - continue; - } - } - } - } -} diff --git a/new/src/backend/legacy.rs b/new/src/backend/legacy.rs deleted file mode 100644 index d7aadf4989..0000000000 --- a/new/src/backend/legacy.rs +++ /dev/null @@ -1,436 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module exposes a legacy backend implementation, which relies -//! on the legacy RPC API methods. - -mod descendant_streams; - -use crate::backend::utils::{retry, retry_stream}; -use crate::backend::{ - Backend, BlockRef, StorageResponse, StreamOf, StreamOfResults, TransactionStatus, -}; -use crate::config::{Config, HashFor, Hasher, Header, RpcConfigFor}; -use crate::error::BackendError; -use async_trait::async_trait; -use codec::Encode; -use descendant_streams::{StorageFetchDescendantKeysStream, StorageFetchDescendantValuesStream}; -use futures::TryStreamExt; -use futures::{Future, Stream, StreamExt, future, future::Either, stream}; -use subxt_rpcs::RpcClient; -use subxt_rpcs::methods::legacy::NumberOrHex; -use subxt_rpcs::methods::legacy::{LegacyRpcMethods, TransactionStatus as RpcTransactionStatus}; - -/// Configure and build an [`LegacyBackend`]. -pub struct LegacyBackendBuilder { - storage_page_size: u32, - _marker: std::marker::PhantomData, -} - -impl Default for LegacyBackendBuilder { - fn default() -> Self { - Self::new() - } -} - -impl LegacyBackendBuilder { - /// Create a new [`LegacyBackendBuilder`]. - pub fn new() -> Self { - Self { - storage_page_size: 64, - _marker: std::marker::PhantomData, - } - } - - /// Iterating over storage entries using the [`LegacyBackend`] requires - /// fetching entries in batches. This configures the number of entries that - /// we'll try to obtain in each batch (default: 64). - pub fn storage_page_size(mut self, storage_page_size: u32) -> Self { - self.storage_page_size = storage_page_size; - self - } - - /// Given an [`RpcClient`] to use to make requests, this returns a [`LegacyBackend`], - /// which implements the [`Backend`] trait. - pub fn build(self, client: impl Into) -> LegacyBackend { - LegacyBackend { - storage_page_size: self.storage_page_size, - methods: LegacyRpcMethods::new(client.into()), - } - } -} - -/// The legacy backend. -#[derive(Debug)] -pub struct LegacyBackend { - storage_page_size: u32, - methods: LegacyRpcMethods>, -} - -impl Clone for LegacyBackend { - fn clone(&self) -> LegacyBackend { - LegacyBackend { - storage_page_size: self.storage_page_size, - methods: self.methods.clone(), - } - } -} - -impl LegacyBackend { - /// Configure and construct an [`LegacyBackend`]. - pub fn builder() -> LegacyBackendBuilder { - LegacyBackendBuilder::new() - } -} - -impl super::sealed::Sealed for LegacyBackend {} - -#[async_trait] -impl Backend for LegacyBackend { - async fn storage_fetch_values( - &self, - keys: Vec>, - at: HashFor, - ) -> Result, BackendError> { - fn get_entry( - key: Vec, - at: HashFor, - methods: LegacyRpcMethods>, - ) -> impl Future, BackendError>> { - retry(move || { - let methods = methods.clone(); - let key = key.clone(); - async move { - let res = methods.state_get_storage(&key, Some(at)).await?; - Ok(res.map(move |value| StorageResponse { key, value })) - } - }) - } - - let keys = keys.clone(); - let methods = self.methods.clone(); - - // For each key, return it + a future to get the result. - let iter = keys - .into_iter() - .map(move |key| get_entry(key, at, methods.clone())); - - let s = stream::iter(iter) - // Resolve the future - .then(|fut| fut) - // Filter any Options out (ie if we didn't find a value at some key we return nothing for it). - .filter_map(|r| future::ready(r.transpose())); - - Ok(StreamOf(Box::pin(s))) - } - - async fn storage_fetch_descendant_keys( - &self, - key: Vec, - at: HashFor, - ) -> Result>, BackendError> { - let keys = StorageFetchDescendantKeysStream::new( - self.methods.clone(), - key, - at, - self.storage_page_size, - ); - - let keys = keys.flat_map(|keys| { - match keys { - Err(e) => { - // If there's an error, return that next: - Either::Left(stream::iter(std::iter::once(Err(e)))) - } - Ok(keys) => { - // Or, stream each "ok" value: - Either::Right(stream::iter(keys.into_iter().map(Ok))) - } - } - }); - - Ok(StreamOf(Box::pin(keys))) - } - - async fn storage_fetch_descendant_values( - &self, - key: Vec, - at: HashFor, - ) -> Result, BackendError> { - let values_stream = StorageFetchDescendantValuesStream::new( - self.methods.clone(), - key, - at, - self.storage_page_size, - ); - - Ok(StreamOf(Box::pin(values_stream))) - } - - async fn genesis_hash(&self) -> Result, BackendError> { - retry(|| async { - let hash = self.methods.genesis_hash().await?; - Ok(hash) - }) - .await - } - - async fn block_number_to_hash( - &self, - number: u64, - ) -> Result>>, BackendError> { - retry(|| async { - let number_or_hash = NumberOrHex::Number(number); - let hash = self - .methods - .chain_get_block_hash(Some(number_or_hash)) - .await? - .map(BlockRef::from_hash); - Ok(hash) - }) - .await - } - - async fn block_header(&self, at: HashFor) -> Result, BackendError> { - retry(|| async { - let header = self.methods.chain_get_header(Some(at)).await?; - Ok(header) - }) - .await - } - - async fn block_body(&self, at: HashFor) -> Result>>, BackendError> { - retry(|| async { - let Some(details) = self.methods.chain_get_block(Some(at)).await? else { - return Ok(None); - }; - Ok(Some( - details.block.extrinsics.into_iter().map(|b| b.0).collect(), - )) - }) - .await - } - - async fn latest_finalized_block_ref(&self) -> Result>, BackendError> { - retry(|| async { - let hash = self.methods.chain_get_finalized_head().await?; - Ok(BlockRef::from_hash(hash)) - }) - .await - } - - async fn stream_all_block_headers( - &self, - hasher: T::Hasher, - ) -> Result>)>, BackendError> { - let methods = self.methods.clone(); - let retry_sub = retry_stream(move || { - let methods = methods.clone(); - let hasher = hasher.clone(); - Box::pin(async move { - let sub = methods.chain_subscribe_all_heads().await?; - let sub = sub.map_err(|e| e.into()).map(move |r| { - r.map(|h| { - let hash = hasher.hash(&h.encode()); - (h, BlockRef::from_hash(hash)) - }) - }); - Ok(StreamOf(Box::pin(sub))) - }) - }) - .await?; - - Ok(retry_sub) - } - - async fn stream_best_block_headers( - &self, - hasher: T::Hasher, - ) -> Result>)>, BackendError> { - let methods = self.methods.clone(); - - let retry_sub = retry_stream(move || { - let methods = methods.clone(); - let hasher = hasher.clone(); - Box::pin(async move { - let sub = methods.chain_subscribe_new_heads().await?; - let sub = sub.map_err(|e| e.into()).map(move |r| { - r.map(|h| { - let hash = hasher.hash(&h.encode()); - (h, BlockRef::from_hash(hash)) - }) - }); - Ok(StreamOf(Box::pin(sub))) - }) - }) - .await?; - - Ok(retry_sub) - } - - async fn stream_finalized_block_headers( - &self, - hasher: T::Hasher, - ) -> Result>)>, BackendError> { - let this = self.clone(); - - let retry_sub = retry_stream(move || { - let this = this.clone(); - let hasher = hasher.clone(); - Box::pin(async move { - let sub = this.methods.chain_subscribe_finalized_heads().await?; - - // Get the last finalized block immediately so that the stream will emit every finalized block after this. - let last_finalized_block_ref = this.latest_finalized_block_ref().await?; - let last_finalized_block_num = this - .block_header(last_finalized_block_ref.hash()) - .await? - .map(|h| h.number().into()); - - // Fill in any missing blocks, because the backend may not emit every finalized block; just the latest ones which - // are finalized each time. - let sub = subscribe_to_block_headers_filling_in_gaps( - this.methods.clone(), - sub, - last_finalized_block_num, - ); - let sub = sub.map(move |r| { - r.map(|h| { - let hash = hasher.hash(&h.encode()); - (h, BlockRef::from_hash(hash)) - }) - }); - - Ok(StreamOf(Box::pin(sub))) - }) - }) - .await?; - - Ok(retry_sub) - } - - async fn submit_transaction( - &self, - extrinsic: &[u8], - ) -> Result>>, BackendError> { - let sub = self - .methods - .author_submit_and_watch_extrinsic(extrinsic) - .await?; - - let sub = sub.filter_map(|r| { - let mapped = r - .map_err(|e| e.into()) - .map(|tx| { - match tx { - // We ignore these because they don't map nicely to the new API. They don't signal "end states" so this should be fine. - RpcTransactionStatus::Future => None, - RpcTransactionStatus::Retracted(_) => None, - // These roughly map across: - RpcTransactionStatus::Ready => Some(TransactionStatus::Validated), - RpcTransactionStatus::Broadcast(_peers) => { - Some(TransactionStatus::Broadcasted) - } - RpcTransactionStatus::InBlock(hash) => { - Some(TransactionStatus::InBestBlock { - hash: BlockRef::from_hash(hash), - }) - } - // These 5 mean that the stream will very likely end: - RpcTransactionStatus::FinalityTimeout(_) => { - Some(TransactionStatus::Dropped { - message: "Finality timeout".into(), - }) - } - RpcTransactionStatus::Finalized(hash) => { - Some(TransactionStatus::InFinalizedBlock { - hash: BlockRef::from_hash(hash), - }) - } - RpcTransactionStatus::Usurped(_) => Some(TransactionStatus::Invalid { - message: "Transaction was usurped by another with the same nonce" - .into(), - }), - RpcTransactionStatus::Dropped => Some(TransactionStatus::Dropped { - message: "Transaction was dropped".into(), - }), - RpcTransactionStatus::Invalid => Some(TransactionStatus::Invalid { - message: - "Transaction is invalid (eg because of a bad nonce, signature etc)" - .into(), - }), - } - }) - .transpose(); - - future::ready(mapped) - }); - - Ok(StreamOf::new(Box::pin(sub))) - } - - async fn call( - &self, - method: &str, - call_parameters: Option<&[u8]>, - at: HashFor, - ) -> Result, BackendError> { - retry(|| async { - let res = self - .methods - .state_call(method, call_parameters, Some(at)) - .await?; - Ok(res) - }) - .await - } -} - -/// Note: This is exposed for testing but is not considered stable and may change -/// without notice in a patch release. -#[doc(hidden)] -pub fn subscribe_to_block_headers_filling_in_gaps( - methods: LegacyRpcMethods>, - sub: S, - mut last_block_num: Option, -) -> impl Stream> + Send -where - T: Config, - S: Stream> + Send, - E: Into + Send + 'static, -{ - sub.flat_map(move |s| { - // Get the header, or return a stream containing just the error. - let header = match s { - Ok(header) => header, - Err(e) => return Either::Left(stream::once(async { Err(e.into()) })), - }; - - // We want all previous details up to, but not including this current block num. - let end_block_num = header.number().into(); - - // This is one after the last block we returned details for last time. - let start_block_num = last_block_num.map(|n| n + 1).unwrap_or(end_block_num); - - // Iterate over all of the previous blocks we need headers for, ignoring the current block - // (which we already have the header info for): - let methods = methods.clone(); - let previous_headers = stream::iter(start_block_num..end_block_num) - .then(move |n| { - let methods = methods.clone(); - async move { - let hash = methods.chain_get_block_hash(Some(n.into())).await?; - let header = methods.chain_get_header(hash).await?; - Ok::<_, BackendError>(header) - } - }) - .filter_map(async |h| h.transpose()); - - // On the next iteration, we'll get details starting just after this end block. - last_block_num = Some(end_block_num); - - // Return a combination of any previous headers plus the new header. - Either::Right(previous_headers.chain(stream::once(async { Ok(header) }))) - }) -} diff --git a/new/src/backend/utils.rs b/new/src/backend/utils.rs deleted file mode 100644 index d687c734b8..0000000000 --- a/new/src/backend/utils.rs +++ /dev/null @@ -1,281 +0,0 @@ -//! Backend utils. - -use super::{StreamOf, StreamOfResults}; -use crate::error::BackendError; -use futures::{FutureExt, Stream, StreamExt}; -use std::{future::Future, pin::Pin, task::Poll}; - -/// Spawn a task. -/// -/// - On non-wasm targets, this will spawn a task via [`tokio::spawn`]. -/// - On wasm targets, this will spawn a task via [`wasm_bindgen_futures::spawn_local`]. -#[cfg(feature = "runtime")] -pub(crate) fn spawn(future: F) { - #[cfg(not(target_family = "wasm"))] - tokio::spawn(async move { - future.await; - }); - #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] - wasm_bindgen_futures::spawn_local(async move { - future.await; - }); -} - -/// Retry a future until it doesn't return a disconnected error. -/// -/// # Example -/// -/// ```rust,no_run,standalone_crate -/// use subxt::backend::utils::retry; -/// -/// async fn some_future() -> Result<(), subxt::error::BackendError> { -/// Ok(()) -/// } -/// -/// #[tokio::main] -/// async fn main() { -/// let result = retry(|| some_future()).await; -/// } -/// ``` -pub async fn retry(mut retry_future: F) -> Result -where - F: FnMut() -> T, - T: Future>, -{ - const REJECTED_MAX_RETRIES: usize = 10; - let mut rejected_retries = 0; - - loop { - match retry_future().await { - Ok(v) => return Ok(v), - Err(e) => { - if e.is_disconnected_will_reconnect() { - continue; - } - - // TODO: https://github.com/paritytech/subxt/issues/1567 - // This is a hack because, in the event of a disconnection, - // we may not get the correct subscription ID back on reconnecting. - // - // This is because we have a race between this future and the - // separate chainHead subscription, which runs in a different task. - // if this future is too quick, it'll be given back an old - // subscription ID from the chainHead subscription which has yet - // to reconnect and establish a new subscription ID. - // - // In the event of a wrong subscription Id being used, we happen to - // hand back an `RpcError::LimitReached`, and so can retry when we - // specifically hit that error to see if we get a new subscription ID - // eventually. - if e.is_rpc_limit_reached() && rejected_retries < REJECTED_MAX_RETRIES { - rejected_retries += 1; - continue; - } - - return Err(e); - } - } - } -} - -/// Create a retry stream that will resubscribe on disconnect. -/// -/// It's important to note that this function is intended to work only for stateless subscriptions. -/// If the subscription takes input or modifies state, this function should not be used. -/// -/// # Example -/// -/// ```rust,no_run,standalone_crate -/// use subxt::backend::{utils::retry_stream, StreamOf}; -/// use futures::future::FutureExt; -/// -/// #[tokio::main] -/// async fn main() { -/// retry_stream(|| { -/// // This needs to return a stream of results but if you are using -/// // the subxt backend already it will return StreamOf so you can just -/// // return it directly in the async block below. -/// async move { Ok(StreamOf::new(Box::pin(futures::stream::iter([Ok(2)])))) }.boxed() -/// }).await; -/// } -/// ``` -pub async fn retry_stream(get_stream: F) -> Result, BackendError> -where - F: Clone + Send + 'static + FnMut() -> Fut, - Fut: Future, BackendError>> + Send, - R: Send + 'static, -{ - // This returns the stream. On disconnect this is called again. - let get_stream_with_retry = move || { - let get_stream = get_stream.clone(); - async move { retry(get_stream).await }.boxed() - }; - - // The extra Box is to encapsulate the retry subscription type - Ok(StreamOf::new(Box::pin(RetrySubscription { - state: RetrySubscriptionState::Init, - resubscribe: get_stream_with_retry, - }))) -} - -/// Retry subscription. -struct RetrySubscription { - resubscribe: F, - state: RetrySubscriptionState, -} - -enum RetrySubscriptionState { - Init, - Pending(R), - Stream(StreamOfResults), - Done, -} - -impl std::marker::Unpin for RetrySubscription {} - -impl Stream for RetrySubscription -where - F: FnMut() -> R, - R: Future, BackendError>> + Unpin, -{ - type Item = Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - loop { - match &mut self.state { - RetrySubscriptionState::Init => { - self.state = RetrySubscriptionState::Pending((self.resubscribe)()); - } - RetrySubscriptionState::Stream(s) => match s.poll_next_unpin(cx) { - Poll::Ready(Some(Err(err))) => { - if err.is_disconnected_will_reconnect() { - self.state = RetrySubscriptionState::Init; - } - return Poll::Ready(Some(Err(err))); - } - Poll::Ready(None) => { - self.state = RetrySubscriptionState::Done; - return Poll::Ready(None); - } - Poll::Ready(Some(Ok(val))) => { - return Poll::Ready(Some(Ok(val))); - } - Poll::Pending => { - return Poll::Pending; - } - }, - RetrySubscriptionState::Pending(fut) => match fut.poll_unpin(cx) { - Poll::Ready(Err(err)) => { - if err.is_disconnected_will_reconnect() { - self.state = RetrySubscriptionState::Init; - } - return Poll::Ready(Some(Err(err))); - } - Poll::Ready(Ok(stream)) => { - self.state = RetrySubscriptionState::Stream(stream); - continue; - } - Poll::Pending => { - return Poll::Pending; - } - }, - RetrySubscriptionState::Done => return Poll::Ready(None), - }; - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::backend::StreamOf; - - fn disconnect_err() -> BackendError { - BackendError::Rpc(subxt_rpcs::Error::DisconnectedWillReconnect(String::new()).into()) - } - - fn custom_err() -> BackendError { - BackendError::other("") - } - - #[tokio::test] - async fn retry_stream_works() { - let retry_stream = retry_stream(|| { - async { - Ok(StreamOf::new(Box::pin(futures::stream::iter([ - Ok(1), - Ok(2), - Ok(3), - Err(disconnect_err()), - ])))) - } - .boxed() - }) - .await - .unwrap(); - - let result = retry_stream - .take(5) - .collect::>>() - .await; - - assert!(matches!(result[0], Ok(r) if r == 1)); - assert!(matches!(result[1], Ok(r) if r == 2)); - assert!(matches!(result[2], Ok(r) if r == 3)); - assert!(matches!(result[3], Err(ref e) if e.is_disconnected_will_reconnect())); - assert!(matches!(result[4], Ok(r) if r == 1)); - } - - #[tokio::test] - async fn retry_sub_works() { - let stream = futures::stream::iter([Ok(1), Err(disconnect_err())]); - - let resubscribe = Box::new(move || { - async move { Ok(StreamOf::new(Box::pin(futures::stream::iter([Ok(2)])))) }.boxed() - }); - - let retry_stream = RetrySubscription { - state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))), - resubscribe, - }; - - let result: Vec<_> = retry_stream.collect().await; - - assert!(matches!(result[0], Ok(r) if r == 1)); - assert!(matches!(result[1], Err(ref e) if e.is_disconnected_will_reconnect())); - assert!(matches!(result[2], Ok(r) if r == 2)); - } - - #[tokio::test] - async fn retry_sub_err_terminates_stream() { - let stream = futures::stream::iter([Ok(1)]); - let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed()); - - let retry_stream = RetrySubscription { - state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))), - resubscribe, - }; - - assert_eq!(retry_stream.count().await, 1); - } - - #[tokio::test] - async fn retry_sub_resubscribe_err() { - let stream = futures::stream::iter([Ok(1), Err(disconnect_err())]); - let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed()); - - let retry_stream = RetrySubscription { - state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))), - resubscribe, - }; - - let result: Vec<_> = retry_stream.collect().await; - - assert!(matches!(result[0], Ok(r) if r == 1)); - assert!(matches!(result[1], Err(ref e) if e.is_disconnected_will_reconnect())); - assert!(matches!(result[2], Err(ref e) if matches!(e, BackendError::Other(_)))); - } -} diff --git a/new/src/client/offline_client.rs b/new/src/client/offline_client.rs deleted file mode 100644 index 04d4d759d5..0000000000 --- a/new/src/client/offline_client.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::client::ClientAtBlock; -use crate::config::{Config, HashFor, Hasher}; -use crate::error::OfflineClientAtBlockError; -use std::sync::Arc; -use subxt_metadata::Metadata; - -#[derive(Clone, Debug)] -pub struct OfflineClient { - /// The configuration for this client. - config: T, -} - -impl OfflineClient { - /// Create a new [`OfflineClient`] with the given configuration. - pub fn new(config: T) -> Self { - OfflineClient { config } - } - - /// Pick the block height at which to operate. This references data from the - /// [`OfflineClient`] it's called on, and so cannot outlive it. - pub fn at_block( - &self, - block_number: impl Into, - ) -> Result>, OfflineClientAtBlockError> { - let block_number = block_number.into(); - let (spec_version, transaction_version) = self - .config - .spec_and_transaction_version_for_block_number(block_number) - .ok_or(OfflineClientAtBlockError::SpecVersionNotFound { block_number })?; - - let metadata = self - .config - .metadata_for_spec_version(spec_version) - .ok_or(OfflineClientAtBlockError::MetadataNotFound { spec_version })?; - - let genesis_hash = self.config.genesis_hash(); - - let hasher = ::new(&metadata); - - let offline_client_at_block = OfflineClientAtBlock { - metadata, - block_number, - genesis_hash, - spec_version, - hasher, - transaction_version, - }; - - Ok(ClientAtBlock::new(offline_client_at_block)) - } -} - -#[derive(Clone)] -pub struct OfflineClientAtBlock { - metadata: Arc, - block_number: u64, - genesis_hash: Option>, - spec_version: u32, - hasher: T::Hasher, - transaction_version: u32, -} - -/// This represents an offline-only client at a specific block. -#[doc(hidden)] -pub trait OfflineClientAtBlockT: Clone { - /// Get a reference to the metadata appropriate for this block. - fn metadata_ref(&self) -> &Metadata; - /// Get a clone of the metadata appropriate for this block. - fn metadata(&self) -> Arc; - /// The block number we're operating at. - fn block_number(&self) -> u64; - /// Return the genesis hash for the chain if it is known. - fn genesis_hash(&self) -> Option>; - /// The spec version at the current block. - fn spec_version(&self) -> u32; - /// Return a hasher that works at the current block. - fn hasher(&self) -> &T::Hasher; - /// The transaction version at the current block. - /// - /// Note: This is _not_ the same as the transaction version that - /// is encoded at the beginning of transactions (ie 4 or 5). - fn transaction_version(&self) -> u32; -} - -impl OfflineClientAtBlockT for OfflineClientAtBlock { - fn metadata_ref(&self) -> &Metadata { - &self.metadata - } - fn metadata(&self) -> Arc { - self.metadata.clone() - } - fn block_number(&self) -> u64 { - self.block_number - } - fn genesis_hash(&self) -> Option> { - self.genesis_hash - } - fn spec_version(&self) -> u32 { - self.spec_version - } - fn transaction_version(&self) -> u32 { - self.transaction_version - } - fn hasher(&self) -> &T::Hasher { - &self.hasher - } -} diff --git a/new/src/client/online_client.rs b/new/src/client/online_client.rs deleted file mode 100644 index 287f0257f7..0000000000 --- a/new/src/client/online_client.rs +++ /dev/null @@ -1,597 +0,0 @@ -mod block_number_or_ref; -mod blocks; - -use super::ClientAtBlock; -use super::OfflineClientAtBlockT; -use crate::backend::{Backend, BlockRef, CombinedBackend}; -use crate::config::{Config, HashFor, Hasher, Header}; -use crate::error::{BlocksError, OnlineClientAtBlockError}; -use blocks::Blocks; -use codec::{Compact, Decode, Encode}; -use core::marker::PhantomData; -use frame_decode::helpers::ToTypeRegistry; -use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; -use scale_info_legacy::TypeRegistrySet; -use std::future::Future; -use std::sync::Arc; -use subxt_metadata::Metadata; -use subxt_rpcs::RpcClient; - -#[cfg(feature = "jsonrpsee")] -#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))] -use crate::error::OnlineClientError; - -pub use block_number_or_ref::BlockNumberOrRef; - -/// A client which exposes the means to decode historic data on a chain online. -#[derive(Clone, Debug)] -pub struct OnlineClient { - inner: Arc>, -} - -struct OnlineClientInner { - /// The configuration for this client. - config: T, - /// Chain genesis hash. Needed to construct transactions, - /// so we obtain it up front on constructing this. - genesis_hash: HashFor, - /// The RPC methods used to communicate with the node. - backend: Arc>, -} - -impl std::fmt::Debug for OnlineClientInner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("OnlineClientInner") - .field("config", &"") - .field("backend", &"Arc") - .finish() - } -} - -impl OnlineClient { - /// Construct a new [`OnlineClient`] using default settings which - /// point to a locally running node on `ws://127.0.0.1:9944`. - /// - /// **Note:** This will only work if the local node is an archive node. - #[cfg(all(feature = "jsonrpsee", feature = "runtime"))] - pub async fn new(config: T) -> Result, OnlineClientError> { - let url = "ws://127.0.0.1:9944"; - OnlineClient::from_url(config, url).await - } - - /// Construct a new [`OnlineClient`], providing a URL to connect to. - #[cfg(all(feature = "jsonrpsee", feature = "runtime"))] - pub async fn from_url( - config: T, - url: impl AsRef, - ) -> Result, OnlineClientError> { - let url_str = url.as_ref(); - let url = url::Url::parse(url_str).map_err(|_| OnlineClientError::InvalidUrl { - url: url_str.to_string(), - })?; - if !Self::is_url_secure(&url) { - return Err(OnlineClientError::RpcError(subxt_rpcs::Error::InsecureUrl( - url_str.to_string(), - ))); - } - OnlineClient::from_insecure_url(config, url).await - } - - /// Construct a new [`OnlineClient`], providing a URL to connect to. - /// - /// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs). - #[cfg(all(feature = "jsonrpsee", feature = "runtime"))] - pub async fn from_insecure_url( - config: T, - url: impl AsRef, - ) -> Result, OnlineClientError> { - let rpc_client = RpcClient::from_insecure_url(url).await?; - OnlineClient::from_rpc_client(config, rpc_client).await - } - - fn is_url_secure(url: &url::Url) -> bool { - let secure_scheme = url.scheme() == "https" || url.scheme() == "wss"; - let is_localhost = url.host().is_some_and(|e| match e { - url::Host::Domain(e) => e == "localhost", - url::Host::Ipv4(e) => e.is_loopback(), - url::Host::Ipv6(e) => e.is_loopback(), - }); - secure_scheme || is_localhost - } - - /// Construct a new [`OnlineClient`] by providing an [`RpcClient`] to drive the connection. - /// This will use the current default [`Backend`], which may change in future releases. - #[cfg(all(feature = "jsonrpsee", feature = "runtime"))] - pub async fn from_rpc_client( - config: T, - rpc_client: impl Into, - ) -> Result, OnlineClientError> { - let rpc_client = rpc_client.into(); - let backend = CombinedBackend::builder() - .build_with_background_driver(rpc_client) - .await - .map_err(OnlineClientError::CannotBuildCombinedBackend)?; - let backend: Arc> = Arc::new(backend); - OnlineClient::from_backend(config, backend).await - } - - /// Construct a new [`OnlineClient`] by providing an underlying [`Backend`] - /// implementation to power it. - pub async fn from_backend( - config: T, - backend: impl Into>>, - ) -> Result, OnlineClientError> { - let backend = backend.into(); - let genesis_hash = match config.genesis_hash() { - Some(hash) => hash, - None => backend - .genesis_hash() - .await - .map_err(OnlineClientError::CannotGetGenesisHash)?, - }; - - Ok(OnlineClient { - inner: Arc::new(OnlineClientInner { - config, - genesis_hash, - backend: backend.into(), - }), - }) - } - - /// Obtain a stream of all blocks imported by the node. - /// - /// **Note:** You probably want to use [`Self::stream_blocks()`] most of - /// the time. Blocks returned here may be pruned at any time and become inaccessible, - /// leading to errors when trying to work with them. - pub async fn stream_all_blocks(&self) -> Result, BlocksError> { - // We need a hasher to know how to hash things. Thus, we need metadata to instantiate - // the hasher, so let's use the current block. - let current_block = self - .at_current_block() - .await - .map_err(BlocksError::CannotGetCurrentBlock)?; - let hasher = current_block.client.hasher.clone(); - - let stream = self - .inner - .backend - .stream_all_block_headers(hasher) - .await - .map_err(BlocksError::CannotGetBlockHeaderStream)?; - - Ok(Blocks::from_headers_stream(self.clone(), stream)) - } - - /// Obtain a stream of blocks imported by the node onto the current best fork. - /// - /// **Note:** You probably want to use [`Self::stream_blocks()`] most of - /// the time. Blocks returned here may be pruned at any time and become inaccessible, - /// leading to errors when trying to work with them. - pub async fn stream_best_blocks(&self) -> Result, BlocksError> { - // We need a hasher to know how to hash things. Thus, we need metadata to instantiate - // the hasher, so let's use the current block. - let current_block = self - .at_current_block() - .await - .map_err(BlocksError::CannotGetCurrentBlock)?; - let hasher = current_block.client.hasher.clone(); - - let stream = self - .inner - .backend - .stream_best_block_headers(hasher) - .await - .map_err(BlocksError::CannotGetBlockHeaderStream)?; - - Ok(Blocks::from_headers_stream(self.clone(), stream)) - } - - /// Obtain a stream of finalized blocks. - pub async fn stream_blocks(&self) -> Result, BlocksError> { - // We need a hasher to know how to hash things. Thus, we need metadata to instantiate - // the hasher, so let's use the current block. - let current_block = self - .at_current_block() - .await - .map_err(BlocksError::CannotGetCurrentBlock)?; - let hasher = current_block.client.hasher.clone(); - - let stream = self - .inner - .backend - .stream_finalized_block_headers(hasher) - .await - .map_err(BlocksError::CannotGetBlockHeaderStream)?; - - Ok(Blocks::from_headers_stream(self.clone(), stream)) - } - - /// Instantiate a client to work at the current finalized block _at the time of instantiation_. - /// This does not track new blocks. - pub async fn at_current_block( - &self, - ) -> Result>, OnlineClientAtBlockError> { - let latest_block = self - .inner - .backend - .latest_finalized_block_ref() - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetCurrentBlock { reason: e })?; - - self.at_block(latest_block).await - } - - /// Instantiate a client for working at a specific block. - pub async fn at_block( - &self, - number_or_hash: impl Into>, - ) -> Result>, OnlineClientAtBlockError> { - let number_or_hash = number_or_hash.into(); - - // We are given either a block hash or number. We need both. - let (block_ref, block_number) = match number_or_hash { - BlockNumberOrRef::BlockRef(block_ref) => { - let block_hash = block_ref.hash(); - let block_header = self - .inner - .backend - .block_header(block_hash) - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetBlockHeader { - block_hash: block_hash.into(), - reason: e, - })? - .ok_or(OnlineClientAtBlockError::BlockHeaderNotFound { - block_hash: block_hash.into(), - })?; - (block_ref, block_header.number()) - } - BlockNumberOrRef::Number(block_number) => { - let block_ref = self - .inner - .backend - .block_number_to_hash(block_number) - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetBlockHash { - block_number, - reason: e, - })? - .ok_or(OnlineClientAtBlockError::BlockNotFound { block_number })?; - (block_ref, block_number) - } - }; - - self.at_block_hash_and_number(block_ref, block_number).await - } - - /// Instantiate a client for working at a specific block. This takes a block hash/ref _and_ the - /// corresponding block number. When both are available, this saves an RPC call to obtain one from - /// the other. - /// - /// **Warning:** If the block hash and number do not align, then things will go wrong. Prefer to - /// use [`Self::at_block`] if in any doubt. - pub async fn at_block_hash_and_number( - &self, - block_ref: impl Into>>, - block_number: u64, - ) -> Result>, OnlineClientAtBlockError> { - let block_ref = block_ref.into(); - let block_hash = block_ref.hash(); - - // Obtain the spec version so that we know which metadata to use at this block. - // Obtain the transaction version because it's required for constructing extrinsics. - let (spec_version, transaction_version) = match self - .inner - .config - .spec_and_transaction_version_for_block_number(block_number) - { - Some(version) => version, - None => { - let spec_version_bytes = self - .inner - .backend - .call("Core_version", None, block_hash) - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion { - block_hash: block_hash.into(), - reason: e, - })?; - - #[derive(codec::Decode)] - struct SpecVersionHeader { - _spec_name: String, - _impl_name: String, - _authoring_version: u32, - spec_version: u32, - _impl_version: u32, - _apis: Vec<([u8; 8], u32)>, - transaction_version: u32, - } - let version = - SpecVersionHeader::decode(&mut &spec_version_bytes[..]).map_err(|e| { - OnlineClientAtBlockError::CannotDecodeSpecVersion { - block_hash: block_hash.into(), - reason: e, - } - })?; - (version.spec_version, version.transaction_version) - } - }; - - // Obtain the metadata for the block. Allow our config to cache it. - let metadata = match self.inner.config.metadata_for_spec_version(spec_version) { - Some(metadata) => metadata, - None => { - let metadata: Metadata = - match get_metadata(&*self.inner.backend, block_hash).await? { - m @ RuntimeMetadata::V0(_) - | m @ RuntimeMetadata::V1(_) - | m @ RuntimeMetadata::V2(_) - | m @ RuntimeMetadata::V3(_) - | m @ RuntimeMetadata::V4(_) - | m @ RuntimeMetadata::V5(_) - | m @ RuntimeMetadata::V6(_) - | m @ RuntimeMetadata::V7(_) => { - return Err(OnlineClientAtBlockError::UnsupportedMetadataVersion { - block_hash: block_hash.into(), - version: m.version(), - }); - } - RuntimeMetadata::V8(m) => { - let types = get_legacy_types(self, &m, spec_version)?; - Metadata::from_v8(&m, &types).map_err(|e| { - OnlineClientAtBlockError::CannotConvertLegacyMetadata { - block_hash: block_hash.into(), - metadata_version: 8, - reason: e, - } - })? - } - RuntimeMetadata::V9(m) => { - let types = get_legacy_types(self, &m, spec_version)?; - Metadata::from_v9(&m, &types).map_err(|e| { - OnlineClientAtBlockError::CannotConvertLegacyMetadata { - block_hash: block_hash.into(), - metadata_version: 9, - reason: e, - } - })? - } - RuntimeMetadata::V10(m) => { - let types = get_legacy_types(self, &m, spec_version)?; - Metadata::from_v10(&m, &types).map_err(|e| { - OnlineClientAtBlockError::CannotConvertLegacyMetadata { - block_hash: block_hash.into(), - metadata_version: 10, - reason: e, - } - })? - } - RuntimeMetadata::V11(m) => { - let types = get_legacy_types(self, &m, spec_version)?; - Metadata::from_v11(&m, &types).map_err(|e| { - OnlineClientAtBlockError::CannotConvertLegacyMetadata { - block_hash: block_hash.into(), - metadata_version: 11, - reason: e, - } - })? - } - RuntimeMetadata::V12(m) => { - let types = get_legacy_types(self, &m, spec_version)?; - Metadata::from_v12(&m, &types).map_err(|e| { - OnlineClientAtBlockError::CannotConvertLegacyMetadata { - block_hash: block_hash.into(), - metadata_version: 12, - reason: e, - } - })? - } - RuntimeMetadata::V13(m) => { - let types = get_legacy_types(self, &m, spec_version)?; - Metadata::from_v13(&m, &types).map_err(|e| { - OnlineClientAtBlockError::CannotConvertLegacyMetadata { - block_hash: block_hash.into(), - metadata_version: 13, - reason: e, - } - })? - } - RuntimeMetadata::V14(m) => Metadata::from_v14(m).map_err(|e| { - OnlineClientAtBlockError::CannotConvertModernMetadata { - block_hash: block_hash.into(), - metadata_version: 14, - reason: e, - } - })?, - RuntimeMetadata::V15(m) => Metadata::from_v15(m).map_err(|e| { - OnlineClientAtBlockError::CannotConvertModernMetadata { - block_hash: block_hash.into(), - metadata_version: 15, - reason: e, - } - })?, - RuntimeMetadata::V16(m) => Metadata::from_v16(m).map_err(|e| { - OnlineClientAtBlockError::CannotConvertModernMetadata { - block_hash: block_hash.into(), - metadata_version: 16, - reason: e, - } - })?, - }; - let metadata = Arc::new(metadata); - self.inner - .config - .set_metadata_for_spec_version(spec_version, metadata.clone()); - metadata - } - }; - - let online_client_at_block = OnlineClientAtBlock { - client: self.clone(), - hasher: ::new(&metadata), - metadata, - block_ref, - block_number, - spec_version, - transaction_version, - }; - - Ok(ClientAtBlock { - client: online_client_at_block, - marker: PhantomData, - }) - } -} - -/// This represents an online client at a specific block. -#[doc(hidden)] -pub trait OnlineClientAtBlockT: OfflineClientAtBlockT { - type AtBlockError: std::error::Error; - /// Return the RPC methods we'll use to interact with the node. - fn backend(&self) -> &dyn Backend; - /// Return the block hash for the current block. - fn block_hash(&self) -> HashFor; - /// Point at a new block. - fn at_block( - &self, - number_or_hash: BlockNumberOrRef, - ) -> impl Future, Self::AtBlockError>>; -} - -/// The inner type providing the necessary data to work online at a specific block. -#[derive(Clone)] -pub struct OnlineClientAtBlock { - client: OnlineClient, - metadata: Arc, - hasher: T::Hasher, - block_ref: BlockRef>, - block_number: u64, - spec_version: u32, - transaction_version: u32, -} - -impl OnlineClientAtBlockT for OnlineClientAtBlock { - type AtBlockError = OnlineClientAtBlockError; - - fn backend(&self) -> &dyn Backend { - &*self.client.inner.backend - } - fn block_hash(&self) -> HashFor { - self.block_ref.hash() - } - async fn at_block( - &self, - number_or_hash: BlockNumberOrRef, - ) -> Result, Self::AtBlockError> { - self.client.at_block(number_or_hash).await - } -} - -impl OfflineClientAtBlockT for OnlineClientAtBlock { - fn metadata_ref(&self) -> &Metadata { - &self.metadata - } - fn metadata(&self) -> Arc { - self.metadata.clone() - } - fn block_number(&self) -> u64 { - self.block_number - } - fn genesis_hash(&self) -> Option> { - Some(self.client.inner.genesis_hash) - } - fn spec_version(&self) -> u32 { - self.spec_version - } - fn transaction_version(&self) -> u32 { - self.transaction_version - } - fn hasher(&self) -> &T::Hasher { - &self.hasher - } -} - -fn get_legacy_types<'a, T: Config, Md: ToTypeRegistry>( - client: &'a OnlineClient, - metadata: &Md, - spec_version: u32, -) -> Result, OnlineClientAtBlockError> { - let mut types = client - .inner - .config - .legacy_types_for_spec_version(spec_version) - .ok_or(OnlineClientAtBlockError::MissingLegacyTypes)?; - - // Extend the types with information from the metadata (ie event/error/call enums): - let additional_types = frame_decode::helpers::type_registry_from_metadata(metadata) - .map_err(|e| OnlineClientAtBlockError::CannotInjectMetadataTypes { parse_error: e })?; - types.prepend(additional_types); - - Ok(types) -} - -async fn get_metadata( - backend: &dyn Backend, - block_hash: HashFor, -) -> Result { - // First, try to use the "modern" metadata APIs to get the most recent version we can. - let version_to_get = backend - .call("Metadata_metadata_versions", None, block_hash) - .await - .ok() - .and_then(|res| >::decode(&mut &res[..]).ok()) - .and_then(|versions| { - // We want to filter out the "unstable" version, which is represented by u32::MAX. - versions.into_iter().filter(|v| *v != u32::MAX).max() - }); - - // We had success calling the above API, so we expect the "modern" metadata API to work. - if let Some(version_to_get) = version_to_get { - let version_bytes = version_to_get.encode(); - let rpc_response = backend - .call( - "Metadata_metadata_at_version", - Some(&version_bytes), - block_hash, - ) - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.into(), - reason: format!("Error calling Metadata_metadata_at_version: {e}"), - })?; - - // Option because we may have asked for a version that doesn't exist. Compact because we get back a Vec - // of the metadata bytes, and the Vec is preceded by it's compact encoded length. The actual bytes are then - // decoded as a `RuntimeMetadataPrefixed`, after this. - let (_, metadata) = , RuntimeMetadataPrefixed)>>::decode(&mut &rpc_response[..]) - .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.into(), - reason: format!("Error decoding response for Metadata_metadata_at_version: {e}"), - })? - .ok_or_else(|| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.into(), - reason: format!("No metadata returned for the latest version from Metadata_metadata_versions ({version_to_get})"), - })?; - - return Ok(metadata.1); - } - - // We didn't get a version from Metadata_metadata_versions, so fall back to the "old" API. - let metadata_bytes = backend - .call("Metadata_metadata", None, block_hash) - .await - .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.into(), - reason: format!("Error calling Metadata_metadata: {e}"), - })?; - - let (_, metadata) = <(Compact, RuntimeMetadataPrefixed)>::decode(&mut &metadata_bytes[..]) - .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { - block_hash: block_hash.into(), - reason: format!("Error decoding response for Metadata_metadata: {e}"), - })?; - - Ok(metadata.1) -} diff --git a/new/src/error/dispatch_error.rs b/new/src/error/dispatch_error.rs deleted file mode 100644 index 94df6b430b..0000000000 --- a/new/src/error/dispatch_error.rs +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! A representation of the dispatch error; an error returned when -//! something fails in trying to submit/execute a transaction. - -use super::{DispatchErrorDecodeError, ModuleErrorDecodeError, ModuleErrorDetailsError}; -use core::fmt::Debug; -use scale_decode::{DecodeAsType, TypeResolver, visitor::DecodeAsTypeResult}; -use std::sync::Arc; -use std::{borrow::Cow, marker::PhantomData}; -use subxt_metadata::Metadata; - -/// An error dispatching a transaction. -#[derive(Debug, thiserror::Error, PartialEq, Eq)] -#[allow(clippy::large_enum_variant)] -#[non_exhaustive] -pub enum DispatchError { - /// Some error occurred. - #[error("Some unknown error occurred.")] - Other, - /// Failed to lookup some data. - #[error("Failed to lookup some data.")] - CannotLookup, - /// A bad origin. - #[error("Bad origin.")] - BadOrigin, - /// A custom error in a module. - #[error("Pallet error: {0}")] - Module(ModuleError), - /// At least one consumer is remaining so the account cannot be destroyed. - #[error("At least one consumer is remaining so the account cannot be destroyed.")] - ConsumerRemaining, - /// There are no providers so the account cannot be created. - #[error("There are no providers so the account cannot be created.")] - NoProviders, - /// There are too many consumers so the account cannot be created. - #[error("There are too many consumers so the account cannot be created.")] - TooManyConsumers, - /// An error to do with tokens. - #[error("Token error: {0}")] - Token(TokenError), - /// An arithmetic error. - #[error("Arithmetic error: {0}")] - Arithmetic(ArithmeticError), - /// The number of transactional layers has been reached, or we are not in a transactional layer. - #[error("Transactional error: {0}")] - Transactional(TransactionalError), - /// Resources exhausted, e.g. attempt to read/write data which is too large to manipulate. - #[error( - "Resources exhausted, e.g. attempt to read/write data which is too large to manipulate." - )] - Exhausted, - /// The state is corrupt; this is generally not going to fix itself. - #[error("The state is corrupt; this is generally not going to fix itself.")] - Corruption, - /// Some resource (e.g. a preimage) is unavailable right now. This might fix itself later. - #[error( - "Some resource (e.g. a preimage) is unavailable right now. This might fix itself later." - )] - Unavailable, - /// Root origin is not allowed. - #[error("Root origin is not allowed.")] - RootNotAllowed, -} - -/// An error relating to tokens when dispatching a transaction. -#[derive(scale_decode::DecodeAsType, Debug, thiserror::Error, PartialEq, Eq)] -#[non_exhaustive] -pub enum TokenError { - /// Funds are unavailable. - #[error("Funds are unavailable.")] - FundsUnavailable, - /// Some part of the balance gives the only provider reference to the account and thus cannot be (re)moved. - #[error( - "Some part of the balance gives the only provider reference to the account and thus cannot be (re)moved." - )] - OnlyProvider, - /// Account cannot exist with the funds that would be given. - #[error("Account cannot exist with the funds that would be given.")] - BelowMinimum, - /// Account cannot be created. - #[error("Account cannot be created.")] - CannotCreate, - /// The asset in question is unknown. - #[error("The asset in question is unknown.")] - UnknownAsset, - /// Funds exist but are frozen. - #[error("Funds exist but are frozen.")] - Frozen, - /// Operation is not supported by the asset. - #[error("Operation is not supported by the asset.")] - Unsupported, - /// Account cannot be created for a held balance. - #[error("Account cannot be created for a held balance.")] - CannotCreateHold, - /// Withdrawal would cause unwanted loss of account. - #[error("Withdrawal would cause unwanted loss of account.")] - NotExpendable, - /// Account cannot receive the assets. - #[error("Account cannot receive the assets.")] - Blocked, -} - -/// An error relating to arithmetic when dispatching a transaction. -#[derive(scale_decode::DecodeAsType, Debug, thiserror::Error, PartialEq, Eq)] -#[non_exhaustive] -pub enum ArithmeticError { - /// Underflow. - #[error("Underflow.")] - Underflow, - /// Overflow. - #[error("Overflow.")] - Overflow, - /// Division by zero. - #[error("Division by zero.")] - DivisionByZero, -} - -/// An error relating to the transactional layers when dispatching a transaction. -#[derive(scale_decode::DecodeAsType, Debug, thiserror::Error, PartialEq, Eq)] -#[non_exhaustive] -pub enum TransactionalError { - /// Too many transactional layers have been spawned. - #[error("Too many transactional layers have been spawned.")] - LimitReached, - /// A transactional layer was expected, but does not exist. - #[error("A transactional layer was expected, but does not exist.")] - NoLayer, -} - -/// Details about a module error that has occurred. -#[derive(Clone, thiserror::Error)] -#[non_exhaustive] -pub struct ModuleError { - metadata: Arc, - /// Bytes representation: - /// - `bytes[0]`: pallet index - /// - `bytes[1]`: error index - /// - `bytes[2..]`: 3 bytes specific for the module error - bytes: [u8; 5], -} - -impl PartialEq for ModuleError { - fn eq(&self, other: &Self) -> bool { - // A module error is the same if the raw underlying details are the same. - self.bytes == other.bytes - } -} - -impl Eq for ModuleError {} - -/// Custom `Debug` implementation, ignores the very large `metadata` field, using it instead (as -/// intended) to resolve the actual pallet and error names. This is much more useful for debugging. -impl Debug for ModuleError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let details = self.details_string(); - write!(f, "ModuleError(<{details}>)") - } -} - -impl std::fmt::Display for ModuleError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let details = self.details_string(); - write!(f, "{details}") - } -} - -impl ModuleError { - /// Return more details about this error. - pub fn details(&self) -> Result, ModuleErrorDetailsError> { - let pallet = self - .metadata - .pallet_by_error_index(self.pallet_index()) - .ok_or(ModuleErrorDetailsError::PalletNotFound { - pallet_index: self.pallet_index(), - })?; - - let variant = pallet - .error_variant_by_index(self.error_index()) - .ok_or_else(|| ModuleErrorDetailsError::ErrorVariantNotFound { - pallet_name: pallet.name().into(), - error_index: self.error_index(), - })?; - - Ok(ModuleErrorDetails { pallet, variant }) - } - - /// Return a formatted string of the resolved error details for debugging/display purposes. - pub fn details_string(&self) -> String { - match self.details() { - Ok(details) => format!( - "{pallet_name}::{variant_name}", - pallet_name = details.pallet.name(), - variant_name = details.variant.name, - ), - Err(_) => format!( - "Unknown pallet error '{bytes:?}' (pallet and error details cannot be retrieved)", - bytes = self.bytes - ), - } - } - - /// Return the underlying module error data that was decoded. - pub fn bytes(&self) -> [u8; 5] { - self.bytes - } - - /// Obtain the pallet index from the underlying byte data. - pub fn pallet_index(&self) -> u8 { - self.bytes[0] - } - - /// Obtain the error index from the underlying byte data. - pub fn error_index(&self) -> u8 { - self.bytes[1] - } - - /// Attempts to decode the ModuleError into the top outer Error enum. - pub fn as_root_error(&self) -> Result { - let decoded = E::decode_as_type( - &mut &self.bytes[..], - self.metadata.outer_enums().error_enum_ty(), - self.metadata.types(), - ) - .map_err(ModuleErrorDecodeError)?; - - Ok(decoded) - } -} - -/// Details about the module error. -pub struct ModuleErrorDetails<'a> { - /// The pallet that the error is in - pub pallet: subxt_metadata::PalletMetadata<'a>, - /// The variant representing the error - pub variant: &'a scale_info::Variant, -} - -impl DispatchError { - /// Attempt to decode a runtime [`DispatchError`]. - #[doc(hidden)] - pub fn decode_from<'a>( - bytes: impl Into>, - metadata: Arc, - ) -> Result { - let bytes = bytes.into(); - let dispatch_error_ty_id = metadata - .dispatch_error_ty() - .ok_or(DispatchErrorDecodeError::DispatchErrorTypeIdNotFound)?; - - // The aim is to decode our bytes into roughly this shape. This is copied from - // `sp_runtime::DispatchError`; we need the variant names and any inner variant - // names/shapes to line up in order for decoding to be successful. - #[derive(scale_decode::DecodeAsType)] - enum DecodedDispatchError { - Other, - CannotLookup, - BadOrigin, - Module(DecodedModuleErrorBytes), - ConsumerRemaining, - NoProviders, - TooManyConsumers, - Token(TokenError), - Arithmetic(ArithmeticError), - Transactional(TransactionalError), - Exhausted, - Corruption, - Unavailable, - RootNotAllowed, - } - - // ModuleError is a bit special; we want to support being decoded from either - // a legacy format of 2 bytes, or a newer format of 5 bytes. So, just grab the bytes - // out when decoding to manually work with them. - struct DecodedModuleErrorBytes(Vec); - struct DecodedModuleErrorBytesVisitor(PhantomData); - impl scale_decode::Visitor for DecodedModuleErrorBytesVisitor { - type Error = scale_decode::Error; - type Value<'scale, 'info> = DecodedModuleErrorBytes; - type TypeResolver = R; - - fn unchecked_decode_as_type<'scale, 'info>( - self, - input: &mut &'scale [u8], - _type_id: R::TypeId, - _types: &'info R, - ) -> DecodeAsTypeResult, Self::Error>> - { - DecodeAsTypeResult::Decoded(Ok(DecodedModuleErrorBytes(input.to_vec()))) - } - } - - impl scale_decode::IntoVisitor for DecodedModuleErrorBytes { - type AnyVisitor = DecodedModuleErrorBytesVisitor; - fn into_visitor() -> DecodedModuleErrorBytesVisitor { - DecodedModuleErrorBytesVisitor(PhantomData) - } - } - - // Decode into our temporary error: - let decoded_dispatch_err = DecodedDispatchError::decode_as_type( - &mut &*bytes, - dispatch_error_ty_id, - metadata.types(), - ) - .map_err(DispatchErrorDecodeError::CouldNotDecodeDispatchError)?; - - // Convert into the outward-facing error, mainly by handling the Module variant. - let dispatch_error = match decoded_dispatch_err { - // Mostly we don't change anything from our decoded to our outward-facing error: - DecodedDispatchError::Other => DispatchError::Other, - DecodedDispatchError::CannotLookup => DispatchError::CannotLookup, - DecodedDispatchError::BadOrigin => DispatchError::BadOrigin, - DecodedDispatchError::ConsumerRemaining => DispatchError::ConsumerRemaining, - DecodedDispatchError::NoProviders => DispatchError::NoProviders, - DecodedDispatchError::TooManyConsumers => DispatchError::TooManyConsumers, - DecodedDispatchError::Token(val) => DispatchError::Token(val), - DecodedDispatchError::Arithmetic(val) => DispatchError::Arithmetic(val), - DecodedDispatchError::Transactional(val) => DispatchError::Transactional(val), - DecodedDispatchError::Exhausted => DispatchError::Exhausted, - DecodedDispatchError::Corruption => DispatchError::Corruption, - DecodedDispatchError::Unavailable => DispatchError::Unavailable, - DecodedDispatchError::RootNotAllowed => DispatchError::RootNotAllowed, - // But we apply custom logic to transform the module error into the outward facing version: - DecodedDispatchError::Module(module_bytes) => { - let module_bytes = module_bytes.0; - - // The old version is 2 bytes; a pallet and error index. - // The new version is 5 bytes; a pallet and error index and then 3 extra bytes. - let bytes = if module_bytes.len() == 2 { - [module_bytes[0], module_bytes[1], 0, 0, 0] - } else if module_bytes.len() == 5 { - [ - module_bytes[0], - module_bytes[1], - module_bytes[2], - module_bytes[3], - module_bytes[4], - ] - } else { - tracing::warn!( - "Can't decode error sp_runtime::DispatchError: bytes do not match known shapes" - ); - // Return _all_ of the bytes; every "unknown" return should be consistent. - return Err(DispatchErrorDecodeError::CouldNotDecodeModuleError { - bytes: bytes.to_vec(), - }); - }; - - // And return our outward-facing version: - DispatchError::Module(ModuleError { metadata, bytes }) - } - }; - - Ok(dispatch_error) - } -} diff --git a/new/src/error/hex.rs b/new/src/error/hex.rs deleted file mode 100644 index 01d67a998e..0000000000 --- a/new/src/error/hex.rs +++ /dev/null @@ -1,15 +0,0 @@ -/// Display hex strings. -#[derive(PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] -pub struct Hex(String); - -impl> From for Hex { - fn from(value: T) -> Self { - Hex(hex::encode(value.as_ref())) - } -} - -impl std::fmt::Display for Hex { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.0.fmt(f) - } -} diff --git a/new/src/lib.rs b/new/src/lib.rs deleted file mode 100644 index 60ffe5bbc5..0000000000 --- a/new/src/lib.rs +++ /dev/null @@ -1,347 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -// TODO: REMOVE BEFORE MERGING. -#![allow(missing_docs)] - -//! Subxt is a library for interacting with Substrate based nodes. Using it looks something like this: -//! -//! ```rust,ignore -#![doc = include_str!("../examples/tx_basic.rs")] -//! ``` -//! -//! Take a look at [the Subxt guide](book) to learn more about how to use Subxt. - -#[cfg(any( - all(feature = "web", feature = "native"), - not(any(feature = "web", feature = "native")) -))] -compile_error!("subxt: exactly one of the 'web' and 'native' features should be used."); - -// Suppress an unused dependency warning because these are -// only used in example code snippets at the time of writing. -#[cfg(test)] -mod only_used_in_docs_or_tests { - use subxt_signer as _; - use tokio as _; - use tracing_subscriber as _; -} - -// // Internal helper macros -// #[macro_use] -// mod macros; - -pub mod backend; -pub mod client; -pub mod config; -pub mod constants; -pub mod custom_values; -pub mod error; -pub mod events; -pub mod extrinsics; -pub mod runtime_apis; -pub mod storage; -pub mod transactions; -pub mod utils; -pub mod view_functions; -// pub mod book; -// pub mod blocks; - -// /// This module provides a [`Config`] type, which is used to define various -// /// types that are important in order to speak to a particular chain. -// /// [`SubstrateConfig`] provides a default set of these types suitable for the -// /// default Substrate node implementation, and [`PolkadotConfig`] for a -// /// Polkadot node. -// pub mod config { -// pub use subxt_core::config::{ -// Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, ExtrinsicParams, -// ExtrinsicParamsEncoder, Hash, HashFor, Hasher, Header, PolkadotConfig, -// PolkadotExtrinsicParams, SubstrateConfig, SubstrateExtrinsicParams, TransactionExtension, -// polkadot, substrate, transaction_extensions, -// }; -// pub use subxt_core::error::ExtrinsicParamsError; -// } - -// /// Types representing the metadata obtained from a node. -// pub mod metadata { -// pub use subxt_metadata::*; -// } - -// /// Submit dynamic transactions. -// pub mod dynamic { -// pub use subxt_core::dynamic::*; -// } - -// // Expose light client bits -// cfg_unstable_light_client! { -// pub use subxt_lightclient as lightclient; -// } - -// // Expose a few of the most common types at root, -// // but leave most types behind their respective modules. -// pub use crate::{ -// client::{OfflineClient, OnlineClient}, -// config::{Config, PolkadotConfig, SubstrateConfig}, -// error::Error, -// metadata::Metadata, -// }; - -/// Re-export external crates that are made use of in the subxt API. -pub mod ext { - pub use codec; - pub use frame_metadata; - pub use futures; - pub use scale_bits; - pub use scale_decode; - pub use scale_encode; - pub use scale_value; - pub use subxt_rpcs; - - #[cfg(feature = "jsonrpsee")] - pub use jsonrpsee; -} - -/// Generate a strongly typed API for interacting with a Substrate runtime from its metadata of WASM. -/// -/// # Metadata -/// -/// First, you'll need to get hold of some metadata for the node you'd like to interact with. One -/// way to do this is by using the `subxt` CLI tool: -/// -/// ```bash -/// # Install the CLI tool: -/// cargo install subxt-cli -/// # Use it to download metadata (in this case, from a node running locally) -/// subxt metadata > polkadot_metadata.scale -/// ``` -/// -/// Run `subxt metadata --help` for more options. -/// -/// # Basic usage -/// -/// We can generate an interface to a chain given either: -/// - A locally saved SCALE encoded metadata file (see above) for that chain, -/// - The Runtime WASM for that chain, or -/// - A URL pointing at the JSON-RPC interface for a node on that chain. -/// -/// In each case, the `subxt` macro will use this data to populate the annotated module with all of the methods -/// and types required for interacting with the chain that the Runtime/metadata was loaded from. -/// -/// Let's look at each of these: -/// -/// ## Using a locally saved metadata file -/// -/// Annotate a Rust module with the `subxt` attribute referencing a metadata file like so: -/// -/// ```rust,no_run,standalone_crate -/// #[subxt::subxt( -/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -/// )] -/// mod polkadot {} -/// ``` -/// -/// ## Using a WASM runtime via `runtime_path = "..."` -/// -/// This requires the `runtime-wasm-path` feature flag. -/// -/// Annotate a Rust module with the `subxt` attribute referencing some runtime WASM like so: -/// -/// ```rust,ignore -/// #[subxt::subxt( -/// runtime_path = "../artifacts/westend_runtime.wasm", -/// )] -/// mod polkadot {} -/// ``` -/// -/// ## Connecting to a node to download metadata via `runtime_metadata_insecure_url = "..."` -/// -/// This will, at compile time, connect to the JSON-RPC interface for some node at the URL given, -/// download the metadata from it, and use that. This can be useful in CI, but is **not recommended** -/// in production code, because: -/// -/// - The compilation time is increased since we have to download metadata from a URL each time. If -/// the node we connect to is unresponsive, this will be slow or could fail. -/// - The metadata may change from what is expected without notice, causing compilation to fail if -/// it leads to changes in the generated interfaces that are being used. -/// - The node that you connect to could be malicious and provide incorrect metadata for the chain. -/// -/// ```rust,ignore -/// #[subxt::subxt( -/// runtime_metadata_insecure_url = "wss://rpc.polkadot.io:443" -/// )] -/// mod polkadot {} -/// ``` -/// -/// # Configuration -/// -/// This macro supports a number of attributes to configure what is generated: -/// -/// ## `crate = "..."` -/// -/// Use this attribute to specify a custom path to the `subxt_core` crate: -/// -/// ```rust,standalone_crate -/// # pub extern crate subxt_core; -/// # pub mod path { pub mod to { pub use subxt_core; } } -/// # fn main() {} -/// #[subxt::subxt( -/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -/// crate = "crate::path::to::subxt_core" -/// )] -/// mod polkadot {} -/// ``` -/// -/// This is useful if you write a library which uses this macro, but don't want to force users to depend on `subxt` -/// at the top level too. By default the path `::subxt` is used. -/// -/// ## `substitute_type(path = "...", with = "...")` -/// -/// This attribute replaces any reference to the generated type at the path given by `path` with a -/// reference to the path given by `with`. -/// -/// ```rust,standalone_crate -/// #[subxt::subxt( -/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -/// substitute_type(path = "sp_arithmetic::per_things::Perbill", with = "crate::Foo") -/// )] -/// mod polkadot {} -/// -/// # #[derive( -/// # scale_encode::EncodeAsType, -/// # scale_decode::DecodeAsType, -/// # codec::Encode, -/// # codec::Decode, -/// # Clone, -/// # Debug, -/// # )] -/// // In reality this needs some traits implementing on -/// // it to allow it to be used in place of Perbill: -/// pub struct Foo(u32); -/// # impl codec::CompactAs for Foo { -/// # type As = u32; -/// # fn encode_as(&self) -> &Self::As { -/// # &self.0 -/// # } -/// # fn decode_from(x: Self::As) -> Result { -/// # Ok(Foo(x)) -/// # } -/// # } -/// # impl From> for Foo { -/// # fn from(v: codec::Compact) -> Foo { -/// # v.0 -/// # } -/// # } -/// # fn main() {} -/// ``` -/// -/// If the type you're substituting contains generic parameters, you can "pattern match" on those, and -/// make use of them in the substituted type, like so: -/// -/// ```rust,no_run,standalone_crate -/// #[subxt::subxt( -/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -/// substitute_type( -/// path = "sp_runtime::multiaddress::MultiAddress", -/// with = "::subxt::utils::Static>" -/// ) -/// )] -/// mod polkadot {} -/// ``` -/// -/// The above is also an example of using the [`crate::utils::Static`] type to wrap some type which doesn't -/// on it's own implement [`scale_encode::EncodeAsType`] or [`scale_decode::DecodeAsType`], which are required traits -/// for any substitute type to implement by default. -/// -/// ## `derive_for_all_types = "..."` -/// -/// By default, all generated types derive a small set of traits. This attribute allows you to derive additional -/// traits on all generated types: -/// -/// ```rust,no_run,standalone_crate -/// #[subxt::subxt( -/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -/// derive_for_all_types = "Eq, PartialEq" -/// )] -/// mod polkadot {} -/// ``` -/// -/// Any substituted types (including the default substitutes) must also implement these traits in order to avoid errors -/// here. -/// -/// ## `derive_for_type(path = "...", derive = "...")` -/// -/// Unlike the above, which derives some trait on every generated type, this attribute allows you to derive traits only -/// for specific types. Note that any types which are used inside the specified type may also need to derive the same traits. -/// -/// ```rust,no_run,standalone_crate -/// #[subxt::subxt( -/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -/// derive_for_all_types = "Eq, PartialEq", -/// derive_for_type(path = "frame_support::PalletId", derive = "Ord, PartialOrd"), -/// derive_for_type(path = "sp_runtime::ModuleError", derive = "Hash"), -/// )] -/// mod polkadot {} -/// ``` -/// -/// ## `generate_docs` -/// -/// By default, documentation is not generated via the macro, since IDEs do not typically make use of it. This attribute -/// forces documentation to be generated, too. -/// -/// ```rust,no_run,standalone_crate -/// #[subxt::subxt( -/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -/// generate_docs -/// )] -/// mod polkadot {} -/// ``` -/// -/// ## `runtime_types_only` -/// -/// By default, the macro will generate various interfaces to make using Subxt simpler in addition with any types that need -/// generating to make this possible. This attribute makes the codegen only generate the types and not the Subxt interface. -/// -/// ```rust,no_run,standalone_crate -/// #[subxt::subxt( -/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -/// runtime_types_only -/// )] -/// mod polkadot {} -/// ``` -/// -/// ## `no_default_derives` -/// -/// By default, the macro will add all derives necessary for the generated code to play nicely with Subxt. Adding this attribute -/// removes all default derives. -/// -/// ```rust,no_run,standalone_crate -/// #[subxt::subxt( -/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", -/// runtime_types_only, -/// no_default_derives, -/// derive_for_all_types="codec::Encode, codec::Decode" -/// )] -/// mod polkadot {} -/// ``` -/// -/// **Note**: At the moment, you must derive at least one of `codec::Encode` or `codec::Decode` or `scale_encode::EncodeAsType` or -/// `scale_decode::DecodeAsType` (because we add `#[codec(..)]` attributes on some fields/types during codegen), and you must use this -/// feature in conjunction with `runtime_types_only` (or manually specify a bunch of defaults to make codegen work properly when -/// generating the subxt interfaces). -/// -/// ## `unstable_metadata` -/// -/// This attribute works only in combination with `runtime_metadata_insecure_url`. By default, the macro will fetch the latest stable -/// version of the metadata from the target node. This attribute makes the codegen attempt to fetch the unstable version of -/// the metadata first. This is **not recommended** in production code, since the unstable metadata a node is providing is likely -/// to be incompatible with Subxt. -/// -/// ```rust,ignore -/// #[subxt::subxt( -/// runtime_metadata_insecure_url = "wss://rpc.polkadot.io:443", -/// unstable_metadata -/// )] -/// mod polkadot {} -/// ``` -pub use subxt_macro::subxt; diff --git a/new/src/utils/account_id.rs b/new/src/utils/account_id.rs deleted file mode 100644 index f459ae511d..0000000000 --- a/new/src/utils/account_id.rs +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! The "default" Substrate/Polkadot AccountId. This is used in codegen, as well as signing related bits. -//! This doesn't contain much functionality itself, but is easy to convert to/from an `sp_core::AccountId32` -//! for instance, to gain functionality without forcing a dependency on Substrate crates here. - -use codec::{Decode, Encode}; -use serde::{Deserialize, Serialize}; -use thiserror::Error as DeriveError; - -/// A 32-byte cryptographic identifier. This is a simplified version of Substrate's -/// `sp_core::crypto::AccountId32`. To obtain more functionality, convert this into -/// that type. -#[derive( - Clone, - Eq, - PartialEq, - Ord, - PartialOrd, - Encode, - Decode, - Debug, - scale_encode::EncodeAsType, - scale_decode::DecodeAsType, - scale_info::TypeInfo, -)] -pub struct AccountId32(pub [u8; 32]); - -impl AsRef<[u8]> for AccountId32 { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } -} - -impl AsRef<[u8; 32]> for AccountId32 { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } -} - -impl From<[u8; 32]> for AccountId32 { - fn from(x: [u8; 32]) -> Self { - AccountId32(x) - } -} - -impl AccountId32 { - // Return the ss58-check string for this key. Adapted from `sp_core::crypto`. We need this to - // serialize our account appropriately but otherwise don't care. - fn to_ss58check(&self) -> String { - // For serializing to a string to obtain the account nonce, we use the default substrate - // prefix (since we have no way to otherwise pick one). It doesn't really matter, since when - // it's deserialized back in system_accountNextIndex, we ignore this (so long as it's valid). - const SUBSTRATE_SS58_PREFIX: u8 = 42; - // prefix <= 63 just take up one byte at the start: - let mut v = vec![SUBSTRATE_SS58_PREFIX]; - // then push the account ID bytes. - v.extend(self.0); - // then push a 2 byte checksum of what we have so far. - let r = ss58hash(&v); - v.extend(&r[0..2]); - // then encode to base58. - use base58::ToBase58; - v.to_base58() - } - - // This isn't strictly needed, but to give our AccountId32 a little more usefulness, we also - // implement the logic needed to decode an AccountId32 from an SS58 encoded string. This is exposed - // via a `FromStr` impl. - fn from_ss58check(s: &str) -> Result { - const CHECKSUM_LEN: usize = 2; - let body_len = 32; - - use base58::FromBase58; - let data = s.from_base58().map_err(|_| FromSs58Error::BadBase58)?; - if data.len() < 2 { - return Err(FromSs58Error::BadLength); - } - let prefix_len = match data[0] { - 0..=63 => 1, - 64..=127 => 2, - _ => return Err(FromSs58Error::InvalidPrefix), - }; - if data.len() != prefix_len + body_len + CHECKSUM_LEN { - return Err(FromSs58Error::BadLength); - } - let hash = ss58hash(&data[0..body_len + prefix_len]); - let checksum = &hash[0..CHECKSUM_LEN]; - if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { - // Invalid checksum. - return Err(FromSs58Error::InvalidChecksum); - } - - let result = data[prefix_len..body_len + prefix_len] - .try_into() - .map_err(|_| FromSs58Error::BadLength)?; - Ok(AccountId32(result)) - } -} - -/// An error obtained from trying to interpret an SS58 encoded string into an AccountId32 -#[derive(Clone, Copy, Eq, PartialEq, Debug, DeriveError)] -#[allow(missing_docs)] -pub enum FromSs58Error { - #[error("Base 58 requirement is violated")] - BadBase58, - #[error("Length is bad")] - BadLength, - #[error("Invalid checksum")] - InvalidChecksum, - #[error("Invalid SS58 prefix byte.")] - InvalidPrefix, -} - -// We do this just to get a checksum to help verify the validity of the address in to_ss58check -fn ss58hash(data: &[u8]) -> Vec { - use blake2::{Blake2b512, Digest}; - const PREFIX: &[u8] = b"SS58PRE"; - let mut ctx = Blake2b512::new(); - ctx.update(PREFIX); - ctx.update(data); - ctx.finalize().to_vec() -} - -impl Serialize for AccountId32 { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(&self.to_ss58check()) - } -} - -impl<'de> Deserialize<'de> for AccountId32 { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - AccountId32::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| serde::de::Error::custom(format!("{e:?}"))) - } -} - -impl core::fmt::Display for AccountId32 { - fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } -} - -impl core::str::FromStr for AccountId32 { - type Err = FromSs58Error; - fn from_str(s: &str) -> Result { - AccountId32::from_ss58check(s) - } -} - -#[cfg(test)] -mod test { - use super::*; - use sp_core::{self, crypto::Ss58Codec}; - use sp_keyring::sr25519::Keyring; - - #[test] - fn ss58_is_compatible_with_substrate_impl() { - let keyrings = vec![Keyring::Alice, Keyring::Bob, Keyring::Charlie]; - - for keyring in keyrings { - let substrate_account = keyring.to_account_id(); - let local_account = AccountId32(substrate_account.clone().into()); - - // Both should encode to ss58 the same way: - let substrate_ss58 = substrate_account.to_ss58check(); - assert_eq!(substrate_ss58, local_account.to_ss58check()); - - // Both should decode from ss58 back to the same: - assert_eq!( - sp_core::crypto::AccountId32::from_ss58check(&substrate_ss58).unwrap(), - substrate_account - ); - assert_eq!( - AccountId32::from_ss58check(&substrate_ss58).unwrap(), - local_account - ); - } - } -} diff --git a/new/src/utils/multi_signature.rs b/new/src/utils/multi_signature.rs deleted file mode 100644 index 0f1c623a4f..0000000000 --- a/new/src/utils/multi_signature.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! The "default" Substrate/Polkadot Signature type. This is used in codegen, as well as signing related bits. -//! This doesn't contain much functionality itself, but is easy to convert to/from an `sp_runtime::MultiSignature` -//! for instance, to gain functionality without forcing a dependency on Substrate crates here. - -use codec::{Decode, Encode}; - -/// Signature container that can store known signature types. This is a simplified version of -/// `sp_runtime::MultiSignature`. To obtain more functionality, convert this into that type. -#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, scale_info::TypeInfo)] -pub enum MultiSignature { - /// An Ed25519 signature. - Ed25519([u8; 64]), - /// An Sr25519 signature. - Sr25519([u8; 64]), - /// An ECDSA/SECP256k1 signature (a 512-bit value, plus 8 bits for recovery ID). - Ecdsa([u8; 65]), -} diff --git a/new/src/utils/yesnomaybe.rs b/new/src/utils/yesnomaybe.rs deleted file mode 100644 index 18a878d942..0000000000 --- a/new/src/utils/yesnomaybe.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -/// A unit marker enum. -pub enum Yes {} -/// A unit marker enum. -pub enum Maybe {} -/// A unit marker enum. -pub enum No {} - -/// This is implemented for [`Yes`] and [`No`] and -/// allows us to check at runtime which of these types is present. -pub trait YesNo { - /// [`Yes`] - fn is_yes() -> bool { - false - } - /// [`No`] - fn is_no() -> bool { - false - } -} - -impl YesNo for Yes { - fn is_yes() -> bool { - true - } -} -impl YesNo for No { - fn is_no() -> bool { - true - } -} - -/// This is implemented for [`Yes`] and [`Maybe`] and -/// allows us to check at runtime which of these types is present. -pub trait YesMaybe { - /// [`Yes`] - fn is_yes() -> bool { - false - } - /// [`Maybe`] - fn is_maybe() -> bool { - false - } -} - -impl YesMaybe for Yes { - fn is_yes() -> bool { - true - } -} -impl YesMaybe for Maybe { - fn is_maybe() -> bool { - true - } -} - -/// This is implemented for [`No`] and [`Maybe`] and -/// allows us to check at runtime which of these types is present. -pub trait NoMaybe { - /// [`No`] - fn is_no() -> bool { - false - } - /// [`Maybe`] - fn is_maybe() -> bool { - false - } -} - -impl NoMaybe for No { - fn is_no() -> bool { - true - } -} -impl NoMaybe for Maybe { - fn is_maybe() -> bool { - true - } -} diff --git a/rpcs/Cargo.toml b/rpcs/Cargo.toml index e18e1db71e..9a7cb75c45 100644 --- a/rpcs/Cargo.toml +++ b/rpcs/Cargo.toml @@ -16,8 +16,6 @@ keywords = ["parity", "subxt", "rpcs"] [features] default = ["jsonrpsee", "native"] - -subxt = ["dep:subxt-core"] jsonrpsee = ["dep:jsonrpsee", "dep:tokio-util"] unstable-light-client = [ @@ -83,9 +81,6 @@ tokio = { workspace = true, optional = true } # Included with the lightclient feature subxt-lightclient = { workspace = true, optional = true, default-features = false } -# Included with the subxt-core feature to impl Config for RpcConfig -subxt-core = { workspace = true, optional = true } - # Included with WASM feature wasm-bindgen-futures = { workspace = true, optional = true } diff --git a/rpcs/src/lib.rs b/rpcs/src/lib.rs index dea37552f1..b8a37d7a7a 100644 --- a/rpcs/src/lib.rs +++ b/rpcs/src/lib.rs @@ -16,8 +16,6 @@ //! The provided RPC client implementations can be used natively (with the default `native` feature //! flag) or in WASM based web apps (with the `web` feature flag). -#![cfg_attr(docsrs, feature(doc_cfg))] - #[cfg(any( all(feature = "web", feature = "native"), not(any(feature = "web", feature = "native")) @@ -62,23 +60,6 @@ impl Hash for T where T: serde::de::DeserializeOwned + serde::Serialize {} pub trait AccountId: serde::Serialize {} impl AccountId for T where T: serde::Serialize {} -// When the subxt feature is enabled, ensure that any valid `subxt::Config` -// is also a valid `RpcConfig`. -#[cfg(feature = "subxt")] -mod impl_config { - use super::*; - use subxt_core::config::HashFor; - - impl RpcConfig for T - where - T: subxt_core::Config, - { - type Header = T::Header; - type Hash = HashFor; - type AccountId = T::AccountId; - } -} - /// This encapsulates any errors that could be emitted in this crate. #[derive(Debug, thiserror::Error)] #[non_exhaustive] diff --git a/signer/Cargo.toml b/signer/Cargo.toml index 867c785eab..7c3dc61cc1 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -15,7 +15,7 @@ description = "Sign extrinsics to be submitted by Subxt" keywords = ["parity", "subxt", "extrinsic", "signer"] [features] -default = ["sr25519", "ecdsa", "subxt", "std"] +default = ["sr25519", "ecdsa", "std"] std = [ "regex/std", "pbkdf2/std", @@ -40,18 +40,25 @@ ecdsa = ["secp256k1"] unstable-eth = ["keccak-hash", "ecdsa", "secp256k1", "bip32"] # Enable support for loading key pairs from polkadot-js json. -polkadot-js-compat = ["std", "subxt", "sr25519", "base64", "scrypt", "crypto_secretbox", "serde", "serde_json"] +polkadot-js-compat = [ + "std", + "sr25519", + "base64", + "scrypt", + "crypto_secretbox", + "serde", + "serde_json" +] # Make the keypair algorithms here compatible with Subxt's Signer trait, # so that they can be used to sign transactions for compatible chains. -subxt = ["dep:subxt-core"] +subxt = ["dep:subxt"] # The getrandom package is used via schnorrkel. We need to enable the JS # feature on it if compiling for the web. web = ["getrandom/js"] [dependencies] -subxt-core = { workspace = true, optional = true, default-features = false } secrecy = { workspace = true } regex = { workspace = true, features = ["unicode"] } hex = { workspace = true } @@ -74,12 +81,16 @@ secp256k1 = { workspace = true, optional = true, features = [ keccak-hash = { workspace = true, optional = true } thiserror = { workspace = true, default-features = false } +# Pulled in if the subxt features is enabled. +subxt = { workspace = true, optional = true } + # These are used if the polkadot-js-compat feature is enabled serde = { workspace = true, optional = true } serde_json = { workspace = true, optional = true } base64 = { workspace = true, optional = true, features = ["alloc"] } scrypt = { workspace = true, default-features = false, optional = true } crypto_secretbox = { workspace = true, optional = true, features = ["alloc", "salsa20"] } +subxt-utils-accountid32 = { workspace = true, optional = true } # We only pull this in to enable the JS flag for schnorrkel to use. getrandom = { workspace = true, optional = true } diff --git a/signer/src/ecdsa.rs b/signer/src/ecdsa.rs index fde7ef9828..1186f44365 100644 --- a/signer/src/ecdsa.rs +++ b/signer/src/ecdsa.rs @@ -297,10 +297,9 @@ pub mod dev { #[cfg(feature = "subxt")] mod subxt_compat { use super::*; - - use subxt_core::config::Config; - use subxt_core::tx::signer::Signer as SignerT; - use subxt_core::utils::{AccountId32, MultiAddress, MultiSignature}; + use subxt::config::Config; + use subxt::transactions::Signer as SignerT; + use subxt::utils::{AccountId32, MultiAddress, MultiSignature}; impl From for MultiSignature { fn from(value: Signature) -> Self { diff --git a/signer/src/eth.rs b/signer/src/eth.rs index 1d31e4cde5..9a152ebc8c 100644 --- a/signer/src/eth.rs +++ b/signer/src/eth.rs @@ -309,10 +309,10 @@ pub mod dev { #[cfg(feature = "subxt")] mod subxt_compat { use super::*; - use subxt_core::config::Config; - use subxt_core::tx::signer::Signer as SignerT; - use subxt_core::utils::AccountId20; - use subxt_core::utils::MultiAddress; + use subxt::config::Config; + use subxt::transactions::Signer as SignerT; + use subxt::utils::AccountId20; + use subxt::utils::MultiAddress; impl SignerT for Keypair where @@ -365,21 +365,22 @@ mod test { use bip39::Mnemonic; use proptest::prelude::*; use secp256k1::Secp256k1; - use subxt_core::utils::AccountId20; - - use subxt_core::{config::*, tx::signer::Signer as SignerT}; + use subxt::utils::AccountId20; + use subxt::transactions::Signer as SignerT; + use subxt::config::{Config, HashFor, substrate}; use super::*; - enum StubEthRuntimeConfig {} + #[derive(Debug, Clone)] + struct StubEthRuntimeConfig; impl Config for StubEthRuntimeConfig { type AccountId = AccountId20; type Address = AccountId20; type Signature = Signature; type Hasher = substrate::BlakeTwo256; - type Header = substrate::SubstrateHeader; - type ExtrinsicParams = SubstrateExtrinsicParams; + type Header = substrate::SubstrateHeader>; + type ExtrinsicParams = substrate::SubstrateExtrinsicParams; type AssetId = u32; } diff --git a/signer/src/lib.rs b/signer/src/lib.rs index 7ceaf87586..2f2a2c4831 100644 --- a/signer/src/lib.rs +++ b/signer/src/lib.rs @@ -13,7 +13,6 @@ //! Enable the `subxt` feature to enable use of this [`sr25519::Keypair`] in signing //! subxt transactions for chains supporting sr25519 signatures. -#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; @@ -24,22 +23,18 @@ mod crypto; // An sr25519 key pair implementation. #[cfg(feature = "sr25519")] -#[cfg_attr(docsrs, doc(cfg(feature = "sr25519")))] pub mod sr25519; // An ecdsa key pair implementation. #[cfg(feature = "ecdsa")] -#[cfg_attr(docsrs, doc(cfg(feature = "ecdsa")))] pub mod ecdsa; // An ethereum signer implementation. #[cfg(feature = "unstable-eth")] -#[cfg_attr(docsrs, doc(cfg(feature = "unstable-eth")))] pub mod eth; /// A polkadot-js account json loader. #[cfg(feature = "polkadot-js-compat")] -#[cfg_attr(docsrs, doc(cfg(feature = "polkadot-js-compat")))] pub mod polkadot_js_compat; // Re-export useful bits and pieces for generating a Pair from a phrase, diff --git a/signer/src/polkadot_js_compat.rs b/signer/src/polkadot_js_compat.rs index c2acc24e86..80aff519f7 100644 --- a/signer/src/polkadot_js_compat.rs +++ b/signer/src/polkadot_js_compat.rs @@ -76,6 +76,9 @@ struct KeyringPairJson { address: AccountId32, } +// Re-export this type which is used above. +pub use subxt_utils_accountid32::AccountId32; + // This can be removed once split_array is stabilized. fn slice_to_u32(slice: &[u8]) -> u32 { u32::from_le_bytes(slice.try_into().expect("Slice should be 4 bytes.")) diff --git a/signer/src/sr25519.rs b/signer/src/sr25519.rs index d6acef9f3c..f00c1bde93 100644 --- a/signer/src/sr25519.rs +++ b/signer/src/sr25519.rs @@ -287,15 +287,11 @@ pub mod dev { // Make `Keypair` usable to sign transactions in Subxt. This is optional so that // `subxt-signer` can be used entirely independently of Subxt. #[cfg(feature = "subxt")] -#[cfg_attr(docsrs, doc(cfg(feature = "subxt")))] mod subxt_compat { use super::*; - - use subxt_core::{ - Config, - tx::signer::Signer as SignerT, - utils::{AccountId32, MultiAddress, MultiSignature}, - }; + use subxt::Config; + use subxt::transactions::Signer as SignerT; + use subxt::utils::{AccountId32, MultiAddress, MultiSignature}; impl From for MultiSignature { fn from(value: Signature) -> Self { diff --git a/subxt/Cargo.toml b/subxt/Cargo.toml index 72e44a46d8..f31f70ab67 100644 --- a/subxt/Cargo.toml +++ b/subxt/Cargo.toml @@ -22,6 +22,14 @@ workspace = true # it's recommended to use `--no-default-features` and then select what you need. default = ["jsonrpsee", "native"] +# Features that we expect to be enabled for documentation. +docs = [ + "default", + "unstable-light-client", + "runtime", + "reconnecting-rpc-client", +] + # Enable this for native (ie non web/wasm builds). # Exactly 1 of "web" and "native" is expected. native = [ @@ -75,21 +83,27 @@ runtime-wasm-path = ["subxt-macro/runtime-wasm-path"] [dependencies] async-trait = { workspace = true } +base58 = { workspace = true } +blake2 = { workspace = true } codec = { package = "parity-scale-codec", workspace = true, features = ["derive"] } derive-where = { workspace = true } scale-info = { workspace = true, features = ["default"] } +scale-info-legacy = { workspace = true } scale-value = { workspace = true, features = ["default"] } scale-bits = { workspace = true, features = ["default"] } scale-decode = { workspace = true, features = ["default"] } scale-encode = { workspace = true, features = ["default"] } futures = { workspace = true } hex = { workspace = true } +impl-serde = { workspace = true, default-features = false } +keccak-hash = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true, features = ["default", "raw_value"] } sp-crypto-hashing = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } frame-metadata = { workspace = true } +frame-decode = { workspace = true, features = ["legacy-types"] } either = { workspace = true } web-time = { workspace = true } @@ -101,10 +115,10 @@ jsonrpsee = { workspace = true, optional = true, features = ["jsonrpsee-types"] # Other subxt crates we depend on. subxt-macro = { workspace = true } -subxt-core = { workspace = true, features = ["std"] } -subxt-metadata = { workspace = true, features = ["std"] } +subxt-metadata = { workspace = true, features = ["std", "legacy"] } subxt-lightclient = { workspace = true, optional = true, default-features = false } -subxt-rpcs = { workspace = true, features = ["subxt"] } +subxt-rpcs = { workspace = true } +subxt-utils-accountid32 = { workspace = true } # For parsing urls to disallow insecure schemes url = { workspace = true } @@ -127,8 +141,8 @@ sp-core = { workspace = true, features = ["std"] } sp-keyring = { workspace = true, features = ["std"] } sp-runtime = { workspace = true, features = ["std"] } assert_matches = { workspace = true } -subxt-signer = { path = "../signer", features = ["unstable-eth"] } -subxt-rpcs = { workspace = true, features = ["subxt", "mock-rpc-client"] } +subxt-signer = { workspace = true, features = ["unstable-eth", "subxt", "sr25519"] } +subxt-rpcs = { workspace = true, features = ["mock-rpc-client"] } # Tracing subscriber is useful for light-client examples to ensure that # the `bootNodes` and chain spec are configured correctly. If all is fine, then # the light-client will emit INFO logs with @@ -140,23 +154,23 @@ tower = { workspace = true } hyper = { workspace = true } http-body = { workspace = true } -[[example]] -name = "light_client_basic" -path = "examples/light_client_basic.rs" -required-features = ["unstable-light-client", "jsonrpsee"] - -[[example]] -name = "light_client_local_node" -path = "examples/light_client_local_node.rs" -required-features = ["unstable-light-client", "jsonrpsee", "native"] - -[[example]] -name = "setup_reconnecting_rpc_client" -path = "examples/setup_reconnecting_rpc_client.rs" -required-features = ["reconnecting-rpc-client"] +# [[example]] +# name = "light_client_basic" +# path = "examples/light_client_basic.rs" +# required-features = ["unstable-light-client", "jsonrpsee"] +# +# [[example]] +# name = "light_client_local_node" +# path = "examples/light_client_local_node.rs" +# required-features = ["unstable-light-client", "jsonrpsee", "native"] +# +# [[example]] +# name = "setup_reconnecting_rpc_client" +# path = "examples/setup_reconnecting_rpc_client.rs" +# required-features = ["reconnecting-rpc-client"] [package.metadata.docs.rs] -features = ["default", "unstable-light-client"] +features = ["docs"] [package.metadata.playground] features = ["default", "unstable-light-client"] diff --git a/subxt/examples/block_decoding_dynamic.rs b/subxt/examples/block_decoding_dynamic.rs deleted file mode 100644 index 44ba483221..0000000000 --- a/subxt/examples/block_decoding_dynamic.rs +++ /dev/null @@ -1,43 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client that subscribes to blocks of the Polkadot network. - let api = OnlineClient::::from_url("wss://rpc.polkadot.io:443").await?; - - // Subscribe to all finalized blocks: - let mut blocks_sub = api.blocks().subscribe_finalized().await?; - while let Some(block) = blocks_sub.next().await { - let block = block?; - let block_number = block.header().number; - let block_hash = block.hash(); - println!("Block #{block_number} ({block_hash})"); - - // Decode each signed extrinsic in the block dynamically - let extrinsics = block.extrinsics().await?; - for ext in extrinsics.iter() { - let Some(transaction_extensions) = ext.transaction_extensions() else { - continue; // we do not look at inherents in this example - }; - - // Decode the fields into our dynamic Value type to display: - let fields = ext.decode_as_fields::()?; - - println!(" {}/{}", ext.pallet_name(), ext.call_name()); - println!(" Transaction Extensions:"); - for signed_ext in transaction_extensions.iter() { - // We only want to take a look at these 3 signed extensions, because the others all just have unit fields. - if ["CheckMortality", "CheckNonce", "ChargeTransactionPayment"] - .contains(&signed_ext.name()) - { - println!(" {}: {}", signed_ext.name(), signed_ext.value()?); - } - } - println!(" Fields:"); - println!(" {fields}\n"); - } - } - - Ok(()) -} diff --git a/subxt/examples/block_decoding_static.rs b/subxt/examples/block_decoding_static.rs deleted file mode 100644 index 9af696bab4..0000000000 --- a/subxt/examples/block_decoding_static.rs +++ /dev/null @@ -1,64 +0,0 @@ -#![allow(missing_docs)] -use subxt::{ - OnlineClient, PolkadotConfig, - utils::{AccountId32, MultiAddress}, -}; - -use codec::Decode; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -use polkadot::balances::calls::types::TransferKeepAlive; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client that subscribes to blocks of the Polkadot network. - let api = OnlineClient::::from_url("wss://rpc.polkadot.io:443").await?; - - // Subscribe to all finalized blocks: - let mut blocks_sub = api.blocks().subscribe_finalized().await?; - - // For each block, print details about the `TransferKeepAlive` transactions we are interested in. - while let Some(block) = blocks_sub.next().await { - let block = block?; - let block_number = block.header().number; - let block_hash = block.hash(); - println!("Block #{block_number} ({block_hash}):"); - - let extrinsics = block.extrinsics().await?; - for transfer in extrinsics.find::() { - let transfer = transfer?; - - let Some(extensions) = transfer.details.transaction_extensions() else { - panic!("TransferKeepAlive should be signed") - }; - - let addr_bytes = transfer - .details - .address_bytes() - .expect("TransferKeepAlive should be signed"); - let sender = MultiAddress::::decode(&mut &addr_bytes[..]) - .expect("Decoding should work"); - let sender = display_address(&sender); - let receiver = display_address(&transfer.value.dest); - let value = transfer.value.value; - let tip = extensions.tip().expect("Should have tip"); - let nonce = extensions.nonce().expect("Should have nonce"); - - println!( - " Transfer of {value} DOT:\n {sender} (Tip: {tip}, Nonce: {nonce}) ---> {receiver}", - ); - } - } - - Ok(()) -} - -fn display_address(addr: &MultiAddress) -> String { - if let MultiAddress::Id(id32) = addr { - format!("{id32}") - } else { - "MultiAddress::...".into() - } -} diff --git a/subxt/examples/blocks_subscribing.rs b/subxt/examples/blocks_subscribing.rs deleted file mode 100644 index f0f0a37d43..0000000000 --- a/subxt/examples/blocks_subscribing.rs +++ /dev/null @@ -1,63 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Subscribe to all finalized blocks: - let mut blocks_sub = api.blocks().subscribe_finalized().await?; - - // For each block, print a bunch of information about it: - while let Some(block) = blocks_sub.next().await { - let block = block?; - - let block_number = block.header().number; - let block_hash = block.hash(); - - println!("Block #{block_number}:"); - println!(" Hash: {block_hash}"); - println!(" Extrinsics:"); - - // Log each of the extrinsic with it's associated events: - let extrinsics = block.extrinsics().await?; - for ext in extrinsics.iter() { - let idx = ext.index(); - let events = ext.events().await?; - let bytes_hex = format!("0x{}", hex::encode(ext.bytes())); - - // See the API docs for more ways to decode extrinsics: - let decoded_ext = ext.as_root_extrinsic::(); - - println!(" Extrinsic #{idx}:"); - println!(" Bytes: {bytes_hex}"); - println!(" Decoded: {decoded_ext:?}"); - - println!(" Events:"); - for evt in events.iter() { - let evt = evt?; - let pallet_name = evt.pallet_name(); - let event_name = evt.variant_name(); - let event_values = evt.decode_as_fields::()?; - - println!(" {pallet_name}_{event_name}"); - println!(" {event_values}"); - } - - println!(" Transaction Extensions:"); - if let Some(transaction_extensions) = ext.transaction_extensions() { - for transaction_extension in transaction_extensions.iter() { - let name = transaction_extension.name(); - let value = transaction_extension.value()?.to_string(); - println!(" {name}: {value}"); - } - } - } - } - - Ok(()) -} diff --git a/subxt/examples/constants_dynamic.rs b/subxt/examples/constants_dynamic.rs deleted file mode 100644 index 2d4ed4c5d7..0000000000 --- a/subxt/examples/constants_dynamic.rs +++ /dev/null @@ -1,26 +0,0 @@ -#![allow(missing_docs)] -use subxt::dynamic::Value; -use subxt::{OnlineClient, PolkadotConfig}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // We can query a constant by providing a tuple of the pallet and constant name. The return type - // will be `Value` if we pass this query: - let constant_query = ("System", "BlockLength"); - let _value = api.constants().at(&constant_query)?; - - // Or we can use the library function to query a constant, which allows us to pass a generic type - // that Subxt will attempt to decode the constant into: - let constant_query = subxt::dynamic::constant::("System", "BlockLength"); - let value = api.constants().at(&constant_query)?; - - // Or we can obtain the bytes for the constant, using either form of query. - let bytes = api.constants().bytes_at(&constant_query)?; - - println!("Constant bytes: {:?}", bytes); - println!("Constant value: {}", value); - Ok(()) -} diff --git a/subxt/examples/constants_static.rs b/subxt/examples/constants_static.rs deleted file mode 100644 index 2bb1aecbf6..0000000000 --- a/subxt/examples/constants_static.rs +++ /dev/null @@ -1,24 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // A query to obtain some constant: - let constant_query = polkadot::constants().system().block_length(); - - // Obtain the value: - let value = api.constants().at(&constant_query)?; - - // Or obtain the bytes: - let bytes = api.constants().bytes_at(&constant_query)?; - - println!("Encoded block length: {bytes:?}"); - println!("Block length: {value:?}"); - Ok(()) -} diff --git a/subxt/examples/events.rs b/subxt/examples/events.rs deleted file mode 100644 index 9861c9238e..0000000000 --- a/subxt/examples/events.rs +++ /dev/null @@ -1,48 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Get events for the latest block: - let events = api.events().at_latest().await?; - - // We can dynamically decode events: - println!("Dynamic event details:"); - for event in events.iter() { - let event = event?; - - let pallet = event.pallet_name(); - let variant = event.variant_name(); - let field_values = event.decode_as_fields::()?; - - println!("{pallet}::{variant}: {field_values}"); - } - - // Or we can attempt to statically decode them into the root Event type: - println!("Static event details:"); - for event in events.iter() { - let event = event?; - - if let Ok(ev) = event.as_root_event::() { - println!("{ev:?}"); - } else { - println!(""); - } - } - - // Or we can look for specific events which match our statically defined ones: - let transfer_event = events.find_first::()?; - if let Some(ev) = transfer_event { - println!(" - Balance transfer success: value: {:?}", ev.amount); - } else { - println!(" - No balance transfer event found in this block"); - } - - Ok(()) -} diff --git a/subxt/examples/light_client_basic.rs b/subxt/examples/light_client_basic.rs deleted file mode 100644 index 397de2a255..0000000000 --- a/subxt/examples/light_client_basic.rs +++ /dev/null @@ -1,47 +0,0 @@ -#![allow(missing_docs)] -use futures::StreamExt; -use subxt::{PolkadotConfig, client::OnlineClient, lightclient::LightClient}; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -const POLKADOT_SPEC: &str = include_str!("../../artifacts/demo_chain_specs/polkadot.json"); -const ASSET_HUB_SPEC: &str = - include_str!("../../artifacts/demo_chain_specs/polkadot_asset_hub.json"); - -#[tokio::main] -async fn main() -> Result<(), Box> { - // The lightclient logs are informative: - tracing_subscriber::fmt::init(); - - // Instantiate a light client with the Polkadot relay chain, - // and connect it to Asset Hub, too. - let (lightclient, polkadot_rpc) = LightClient::relay_chain(POLKADOT_SPEC)?; - let asset_hub_rpc = lightclient.parachain(ASSET_HUB_SPEC)?; - - // Create Subxt clients from these Smoldot backed RPC clients. - let polkadot_api = OnlineClient::::from_rpc_client(polkadot_rpc).await?; - let asset_hub_api = OnlineClient::::from_rpc_client(asset_hub_rpc).await?; - - // Use them! - let polkadot_sub = polkadot_api - .blocks() - .subscribe_finalized() - .await? - .map(|block| ("Polkadot", block)); - let parachain_sub = asset_hub_api - .blocks() - .subscribe_finalized() - .await? - .map(|block| ("AssetHub", block)); - - let mut stream_combinator = futures::stream::select(polkadot_sub, parachain_sub); - - while let Some((chain, block)) = stream_combinator.next().await { - let block = block?; - println!(" Chain {:?} hash={:?}", chain, block.hash()); - } - - Ok(()) -} diff --git a/subxt/examples/light_client_local_node.rs b/subxt/examples/light_client_local_node.rs deleted file mode 100644 index 68012b8551..0000000000 --- a/subxt/examples/light_client_local_node.rs +++ /dev/null @@ -1,58 +0,0 @@ -#![allow(missing_docs)] -use subxt::utils::fetch_chainspec_from_rpc_node; -use subxt::{ - PolkadotConfig, - client::OnlineClient, - lightclient::{ChainConfig, LightClient}, -}; -use subxt_signer::sr25519::dev; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // The smoldot logs are informative: - tracing_subscriber::fmt::init(); - - // Use a utility function to obtain a chain spec from a locally running node: - let chain_spec = fetch_chainspec_from_rpc_node("ws://127.0.0.1:9944").await?; - - // Configure the bootnodes of this chain spec. In this case, because we start one - // single node, the bootnodes must be overwritten for the light client to connect - // to the local node. - // - // The `12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp` is the P2P address - // from a local polkadot node starting with - // `--node-key 0000000000000000000000000000000000000000000000000000000000000001` - let chain_config = ChainConfig::chain_spec(chain_spec.get()).set_bootnodes([ - "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp", - ])?; - - // Start the light client up, establishing a connection to the local node. - let (_light_client, chain_rpc) = LightClient::relay_chain(chain_config)?; - let api = OnlineClient::::from_rpc_client(chain_rpc).await?; - - // Build a balance transfer extrinsic. - let dest = dev::bob().public_key().into(); - let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); - - // Submit the balance transfer extrinsic from Alice, and wait for it to be successful - // and in a finalized block. We get back the extrinsic events if all is well. - let from = dev::alice(); - let events = api - .tx() - .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) - .await? - .wait_for_finalized_success() - .await?; - - // Find a Transfer event and print it. - let transfer_event = events.find_first::()?; - if let Some(event) = transfer_event { - println!("Balance transfer success: {event:?}"); - } - - Ok(()) -} diff --git a/subxt/examples/rpc_legacy.rs b/subxt/examples/rpc_legacy.rs deleted file mode 100644 index a21afd8f97..0000000000 --- a/subxt/examples/rpc_legacy.rs +++ /dev/null @@ -1,61 +0,0 @@ -#![allow(missing_docs)] -use subxt::backend::{legacy::LegacyRpcMethods, rpc::RpcClient}; -use subxt::config::DefaultExtrinsicParamsBuilder as Params; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // First, create a raw RPC client: - let rpc_client = RpcClient::from_url("ws://127.0.0.1:9944").await?; - - // Use this to construct our RPC methods: - let rpc = LegacyRpcMethods::::new(rpc_client.clone()); - - // We can use the same client to drive our full Subxt interface too: - let api = OnlineClient::::from_rpc_client(rpc_client.clone()).await?; - - // Now, we can make some RPC calls using some legacy RPC methods. - println!( - "📛 System Name: {:?}\n🩺 Health: {:?}\n🖫 Properties: {:?}\n🔗 Chain: {:?}\n", - rpc.system_name().await?, - rpc.system_health().await?, - rpc.system_properties().await?, - rpc.system_chain().await? - ); - - // We can also interleave RPC calls and using the full Subxt client, here to submit multiple - // transactions using the legacy `system_account_next_index` RPC call, which returns a nonce - // that is adjusted for any transactions already in the pool: - - let alice = dev::alice(); - let bob = dev::bob(); - - loop { - let current_nonce = rpc - .system_account_next_index(&alice.public_key().into()) - .await?; - - let ext_params = Params::new().mortal(8).nonce(current_nonce).build(); - - let balance_transfer = polkadot::tx() - .balances() - .transfer_allow_death(bob.public_key().into(), 1_000_000); - - let ext_hash = api - .tx() - .create_partial_offline(&balance_transfer, ext_params)? - .sign(&alice) - .submit() - .await?; - - println!("Submitted ext {ext_hash} with nonce {current_nonce}"); - - // Sleep less than block time, but long enough to ensure - // not all transactions end up in the same block. - tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; - } -} diff --git a/subxt/examples/runtime_apis_dynamic.rs b/subxt/examples/runtime_apis_dynamic.rs deleted file mode 100644 index ef9c4ac071..0000000000 --- a/subxt/examples/runtime_apis_dynamic.rs +++ /dev/null @@ -1,30 +0,0 @@ -#![allow(missing_docs)] -use subxt::utils::AccountId32; -use subxt::{OnlineClient, config::PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Create a "dynamic" runtime API payload that calls the - // `AccountNonceApi_account_nonce` function. We could use the - // `scale_value::Value` type as output, and a vec of those as inputs, - // but since we know the input + return types we can pass them directly. - // There is one input argument, so the inputs are a tuple of one element. - let account: AccountId32 = dev::alice().public_key().into(); - let runtime_api_call = - subxt::dynamic::runtime_api_call::<_, u64>("AccountNonceApi", "account_nonce", (account,)); - - // Submit the call to get back a result. - let nonce = api - .runtime_api() - .at_latest() - .await? - .call(runtime_api_call) - .await?; - - println!("Account nonce: {:#?}", nonce); - Ok(()) -} diff --git a/subxt/examples/runtime_apis_raw.rs b/subxt/examples/runtime_apis_raw.rs deleted file mode 100644 index 45b5eecc50..0000000000 --- a/subxt/examples/runtime_apis_raw.rs +++ /dev/null @@ -1,23 +0,0 @@ -#![allow(missing_docs)] -use subxt::ext::codec::{Compact, Decode}; -use subxt::ext::frame_metadata::RuntimeMetadataPrefixed; -use subxt::{OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Use runtime APIs at the latest block: - let runtime_apis = api.runtime_api().at_latest().await?; - - // Ask for metadata and decode it: - let result_bytes = runtime_apis.call_raw("Metadata_metadata", None).await?; - let (_, meta): (Compact, RuntimeMetadataPrefixed) = Decode::decode(&mut &*result_bytes)?; - - println!("{meta:?}"); - Ok(()) -} diff --git a/subxt/examples/runtime_apis_static.rs b/subxt/examples/runtime_apis_static.rs deleted file mode 100644 index 95228668e6..0000000000 --- a/subxt/examples/runtime_apis_static.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, config::PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a client to use: - let api = OnlineClient::::new().await?; - - // Create a runtime API payload that calls into - // `AccountNonceApi_account_nonce` function. - let account = dev::alice().public_key().into(); - let runtime_api_call = polkadot::apis().account_nonce_api().account_nonce(account); - - // Submit the call and get back a result. - let nonce = api - .runtime_api() - .at_latest() - .await? - .call(runtime_api_call) - .await; - - println!("AccountNonceApi_account_nonce for Alice: {nonce:?}"); - Ok(()) -} diff --git a/subxt/examples/setup_client_custom_rpc.rs b/subxt/examples/setup_client_custom_rpc.rs deleted file mode 100644 index 47580ba32a..0000000000 --- a/subxt/examples/setup_client_custom_rpc.rs +++ /dev/null @@ -1,86 +0,0 @@ -#![allow(missing_docs)] -use std::{ - fmt::Write, - pin::Pin, - sync::{Arc, Mutex}, -}; -use subxt::{ - OnlineClient, PolkadotConfig, - backend::rpc::{RawRpcFuture, RawRpcSubscription, RawValue, RpcClient, RpcClientT}, -}; - -// A dummy RPC client that doesn't actually handle requests properly -// at all, but instead just logs what requests to it were made. -struct MyLoggingClient { - log: Arc>, -} - -// We have to implement this fairly low level trait to turn [`MyLoggingClient`] -// into an RPC client that we can make use of in Subxt. Here we just log the requests -// made but don't forward them to any real node, and instead just return nonsense. -impl RpcClientT for MyLoggingClient { - fn request_raw<'a>( - &'a self, - method: &'a str, - params: Option>, - ) -> RawRpcFuture<'a, Box> { - writeln!( - self.log.lock().unwrap(), - "{method}({})", - params.as_ref().map(|p| p.get()).unwrap_or("[]") - ) - .unwrap(); - - // We've logged the request; just return garbage. Because a boxed future is returned, - // you're able to run whatever async code you'd need to actually talk to a node. - let res = RawValue::from_string("[]".to_string()).unwrap(); - Box::pin(std::future::ready(Ok(res))) - } - - fn subscribe_raw<'a>( - &'a self, - sub: &'a str, - params: Option>, - unsub: &'a str, - ) -> RawRpcFuture<'a, RawRpcSubscription> { - writeln!( - self.log.lock().unwrap(), - "{sub}({}) (unsub: {unsub})", - params.as_ref().map(|p| p.get()).unwrap_or("[]") - ) - .unwrap(); - - // We've logged the request; just return garbage. Because a boxed future is returned, - // and that will return a boxed Stream impl, you have a bunch of flexibility to build - // and return whatever type of Stream you see fit. - let res = RawValue::from_string("[]".to_string()).unwrap(); - let stream = futures::stream::once(async move { Ok(res) }); - let stream: Pin + Send>> = Box::pin(stream); - // This subscription does not provide an ID. - Box::pin(std::future::ready(Ok(RawRpcSubscription { - stream, - id: None, - }))) - } -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Instantiate our replacement RPC client. - let log = Arc::default(); - let rpc_client = { - let inner = MyLoggingClient { - log: Arc::clone(&log), - }; - RpcClient::new(inner) - }; - - // Pass this into our OnlineClient to instantiate it. This will lead to some - // RPC calls being made to fetch chain details/metadata, which will immediately - // fail.. - let _ = OnlineClient::::from_rpc_client(rpc_client).await; - - // But, we can see that the calls were made via our custom RPC client: - println!("Log of calls made:\n\n{}", log.lock().unwrap().as_str()); - Ok(()) -} diff --git a/subxt/examples/setup_client_offline.rs b/subxt/examples/setup_client_offline.rs deleted file mode 100644 index ba483f7164..0000000000 --- a/subxt/examples/setup_client_offline.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![allow(missing_docs)] -use subxt::ext::codec::Decode; -use subxt::metadata::Metadata; -use subxt::utils::H256; -use subxt::{OfflineClient, config::PolkadotConfig}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // We need to obtain the following details for an OfflineClient to be instantiated: - - // 1. Genesis hash (RPC call: chain_getBlockHash(0)): - let genesis_hash = { - let h = "91b171bb158e2d3848fa23a9f1c25182fb8e20313b2c1eb49219da7a70ce90c3"; - let bytes = hex::decode(h).unwrap(); - H256::from_slice(&bytes) - }; - - // 2. A runtime version (system_version constant on a Substrate node has these): - let runtime_version = subxt::client::RuntimeVersion { - spec_version: 9370, - transaction_version: 20, - }; - - // 3. Metadata (I'll load it from the downloaded metadata, but you can use - // `subxt metadata > file.scale` to download it): - let metadata = { - let bytes = std::fs::read("./artifacts/polkadot_metadata_small.scale").unwrap(); - Metadata::decode(&mut &*bytes).unwrap() - }; - - // Create an offline client using the details obtained above: - let _api = OfflineClient::::new(genesis_hash, runtime_version, metadata); - - Ok(()) -} diff --git a/subxt/examples/setup_config_assethub.rs b/subxt/examples/setup_config_assethub.rs deleted file mode 100644 index b39f39a2dd..0000000000 --- a/subxt/examples/setup_config_assethub.rs +++ /dev/null @@ -1,54 +0,0 @@ -#![allow(missing_docs)] -use subxt::config::{ - Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, PolkadotConfig, SubstrateConfig, -}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt( - runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", - derive_for_type( - path = "staging_xcm::v3::multilocation::MultiLocation", - derive = "Clone, codec::Encode", - recursive - ) -)] -pub mod runtime {} -use runtime::runtime_types::staging_xcm::v3::multilocation::MultiLocation; -use runtime::runtime_types::xcm::v3::junctions::Junctions; - -// We don't need to construct this at runtime, so an empty enum is appropriate. -pub enum AssetHubConfig {} - -impl Config for AssetHubConfig { - type AccountId = ::AccountId; - type Address = ::Address; - type Signature = ::Signature; - type Hasher = ::Hasher; - type Header = ::Header; - type ExtrinsicParams = DefaultExtrinsicParams; - // Here we use the MultiLocation from the metadata as a part of the config: - // The `ChargeAssetTxPayment` signed extension that is part of the ExtrinsicParams above, now uses the type: - type AssetId = MultiLocation; -} - -#[tokio::main] -async fn main() { - // With the config defined, we can create an extrinsic with subxt: - let client = subxt::OnlineClient::::new().await.unwrap(); - let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); - - // Build extrinsic params using an asset at this location as a tip: - let location: MultiLocation = MultiLocation { - parents: 3, - interior: Junctions::Here, - }; - let tx_config = DefaultExtrinsicParamsBuilder::::new() - .tip_of(1234, location) - .build(); - - // And provide the extrinsic params including the tip when submitting a transaction: - let _ = client - .tx() - .sign_and_submit_then_watch(&tx_payload, &dev::alice(), tx_config) - .await; -} diff --git a/subxt/examples/setup_config_custom.rs b/subxt/examples/setup_config_custom.rs deleted file mode 100644 index a4732f3f89..0000000000 --- a/subxt/examples/setup_config_custom.rs +++ /dev/null @@ -1,97 +0,0 @@ -#![allow(missing_docs)] -use codec::Encode; -use subxt::client::ClientState; -use subxt::config::{ - Config, ExtrinsicParams, ExtrinsicParamsEncoder, ExtrinsicParamsError, HashFor, - transaction_extensions::Params, -}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] -pub mod runtime {} - -// We don't need to construct this at runtime, -// so an empty enum is appropriate: -pub enum CustomConfig {} - -impl Config for CustomConfig { - type AccountId = subxt::utils::AccountId32; - type Address = subxt::utils::MultiAddress; - type Signature = subxt::utils::MultiSignature; - type Hasher = subxt::config::substrate::BlakeTwo256; - type Header = subxt::config::substrate::SubstrateHeader; - type ExtrinsicParams = CustomExtrinsicParams; - type AssetId = u32; -} - -// This represents some arbitrary (and nonsensical) custom parameters that -// will be attached to transaction extra and additional payloads: -pub struct CustomExtrinsicParams { - genesis_hash: HashFor, - tip: u128, - foo: bool, -} - -// We can provide a "pretty" interface to allow users to provide these: -#[derive(Default)] -pub struct CustomExtrinsicParamsBuilder { - tip: u128, - foo: bool, -} - -impl CustomExtrinsicParamsBuilder { - pub fn new() -> Self { - Default::default() - } - pub fn tip(mut self, value: u128) -> Self { - self.tip = value; - self - } - pub fn enable_foo(mut self) -> Self { - self.foo = true; - self - } -} - -impl Params for CustomExtrinsicParamsBuilder {} - -// Describe how to fetch and then encode the params: -impl ExtrinsicParams for CustomExtrinsicParams { - type Params = CustomExtrinsicParamsBuilder; - - // Gather together all of the params we will need to encode: - fn new(client: &ClientState, params: Self::Params) -> Result { - Ok(Self { - genesis_hash: client.genesis_hash, - tip: params.tip, - foo: params.foo, - }) - } -} - -// Encode the relevant params when asked: -impl ExtrinsicParamsEncoder for CustomExtrinsicParams { - fn encode_value_to(&self, v: &mut Vec) { - (self.tip, self.foo).encode_to(v); - } - fn encode_implicit_to(&self, v: &mut Vec) { - self.genesis_hash.encode_to(v) - } -} - -#[tokio::main] -async fn main() { - // With the config defined, it can be handed to Subxt as follows: - let client = subxt::OnlineClient::::new().await.unwrap(); - - let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); - - // Build your custom "Params": - let tx_config = CustomExtrinsicParamsBuilder::new().tip(1234).enable_foo(); - - // And provide them when submitting a transaction: - let _ = client - .tx() - .sign_and_submit_then_watch(&tx_payload, &dev::alice(), tx_config) - .await; -} diff --git a/subxt/examples/setup_config_transaction_extension.rs b/subxt/examples/setup_config_transaction_extension.rs deleted file mode 100644 index f0fcc58894..0000000000 --- a/subxt/examples/setup_config_transaction_extension.rs +++ /dev/null @@ -1,106 +0,0 @@ -#![allow(missing_docs)] -use codec::Encode; -use scale_encode::EncodeAsType; -use scale_info::PortableRegistry; -use subxt::client::ClientState; -use subxt::config::transaction_extensions; -use subxt::config::{ - Config, DefaultExtrinsicParamsBuilder, ExtrinsicParams, ExtrinsicParamsEncoder, - ExtrinsicParamsError, -}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod runtime {} - -// We don't need to construct this at runtime, -// so an empty enum is appropriate: -#[derive(EncodeAsType)] -pub enum CustomConfig {} - -impl Config for CustomConfig { - type AccountId = subxt::utils::AccountId32; - type Address = subxt::utils::MultiAddress; - type Signature = subxt::utils::MultiSignature; - type Hasher = subxt::config::substrate::BlakeTwo256; - type Header = subxt::config::substrate::SubstrateHeader; - type ExtrinsicParams = transaction_extensions::AnyOf< - Self, - ( - // Load in the existing signed extensions we're interested in - // (if the extension isn't actually needed it'll just be ignored): - transaction_extensions::VerifySignature, - transaction_extensions::CheckSpecVersion, - transaction_extensions::CheckTxVersion, - transaction_extensions::CheckNonce, - transaction_extensions::CheckGenesis, - transaction_extensions::CheckMortality, - transaction_extensions::ChargeAssetTxPayment, - transaction_extensions::ChargeTransactionPayment, - transaction_extensions::CheckMetadataHash, - // And add a new one of our own: - CustomTransactionExtension, - ), - >; - type AssetId = u32; -} - -// Our custom signed extension doesn't do much: -pub struct CustomTransactionExtension; - -// Give the extension a name; this allows `AnyOf` to look it -// up in the chain metadata in order to know when and if to use it. -impl transaction_extensions::TransactionExtension for CustomTransactionExtension { - type Decoded = (); - fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { - identifier == "CustomTransactionExtension" - } -} - -// Gather together any params we need for our signed extension, here none. -impl ExtrinsicParams for CustomTransactionExtension { - type Params = (); - - fn new(_client: &ClientState, _params: Self::Params) -> Result { - Ok(CustomTransactionExtension) - } -} - -// Encode whatever the extension needs to provide when asked: -impl ExtrinsicParamsEncoder for CustomTransactionExtension { - fn encode_value_to(&self, v: &mut Vec) { - "Hello".encode_to(v); - } - fn encode_implicit_to(&self, v: &mut Vec) { - true.encode_to(v) - } -} - -// When composing a tuple of signed extensions, the user parameters we need must -// be able to convert `Into` a tuple of corresponding `Params`. Here, we just -// "hijack" the default param builder, but add the `Params` (`()`) for our -// new signed extension at the end, to make the types line up. IN reality you may wish -// to construct an entirely new interface to provide the relevant `Params`. -pub fn custom( - params: DefaultExtrinsicParamsBuilder, -) -> <::ExtrinsicParams as ExtrinsicParams>::Params { - let (a, b, c, d, e, f, g, h, i) = params.build(); - (a, b, c, d, e, f, g, h, i, ()) -} - -#[tokio::main] -async fn main() { - // With the config defined, it can be handed to Subxt as follows: - let client = subxt::OnlineClient::::new().await.unwrap(); - - let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); - - // Configure the tx params: - let tx_config = DefaultExtrinsicParamsBuilder::new().tip(1234); - - // And provide them when submitting a transaction: - let _ = client - .tx() - .sign_and_submit_then_watch(&tx_payload, &dev::alice(), custom(tx_config)) - .await; -} diff --git a/subxt/examples/setup_reconnecting_rpc_client.rs b/subxt/examples/setup_reconnecting_rpc_client.rs deleted file mode 100644 index a3763947c7..0000000000 --- a/subxt/examples/setup_reconnecting_rpc_client.rs +++ /dev/null @@ -1,77 +0,0 @@ -//! Example to utilize the `reconnecting rpc client` in subxt -//! which hidden behind behind `--feature reconnecting-rpc-client` -//! -//! To utilize full logs from the RPC client use: -//! `RUST_LOG="jsonrpsee=trace,subxt-reconnecting-rpc-client=trace"` - -#![allow(missing_docs)] - -use std::time::Duration; - -use futures::StreamExt; -use subxt::backend::rpc::reconnecting_rpc_client::{ExponentialBackoff, RpcClient}; -use subxt::{OnlineClient, PolkadotConfig}; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt::init(); - - // Create a new client with a reconnecting RPC client. - let rpc = RpcClient::builder() - // Reconnect with exponential backoff - // - // This API is "iterator-like" and we use `take` to limit the number of retries. - .retry_policy( - ExponentialBackoff::from_millis(100) - .max_delay(Duration::from_secs(10)) - .take(3), - ) - // There are other configurations as well that can be found at [`reconnecting_rpc_client::ClientBuilder`]. - .build("ws://localhost:9944".to_string()) - .await?; - - // If you want to use the chainhead backend with the reconnecting RPC client, you can do so like this: - // - // ``` - // use subxt::backend::chain_head:ChainHeadBackend; - // use subxt::OnlineClient; - // - // let backend = ChainHeadBackend::builder().build_with_background_task(RpcClient::new(rpc.clone())); - // let api: OnlineClient = OnlineClient::from_backend(Arc::new(backend)).await?; - // ``` - - let api: OnlineClient = OnlineClient::from_rpc_client(rpc.clone()).await?; - - // Run for at most 100 blocks and print a bunch of information about it. - // - // The subscription is automatically re-started when the RPC client has reconnected. - // You can test that by stopping the polkadot node and restarting it. - let mut blocks_sub = api.blocks().subscribe_finalized().await?.take(100); - - while let Some(block) = blocks_sub.next().await { - let block = match block { - Ok(b) => b, - Err(e) => { - // This can only happen on the legacy backend and the unstable backend - // will handle this internally. - if e.is_disconnected_will_reconnect() { - println!("The RPC connection was lost and we may have missed a few blocks"); - continue; - } - - return Err(e.into()); - } - }; - - let block_number = block.number(); - let block_hash = block.hash(); - - println!("Block #{block_number} ({block_hash})"); - } - - Ok(()) -} diff --git a/subxt/examples/setup_rpc_chainhead_backend.rs b/subxt/examples/setup_rpc_chainhead_backend.rs deleted file mode 100644 index 37da5fce19..0000000000 --- a/subxt/examples/setup_rpc_chainhead_backend.rs +++ /dev/null @@ -1,35 +0,0 @@ -//! Example to utilize the ChainHeadBackend rpc backend to subscribe to finalized blocks. - -#![allow(missing_docs)] - -use futures::StreamExt; -use subxt::backend::chain_head::{ChainHeadBackend, ChainHeadBackendBuilder}; -use subxt::backend::rpc::RpcClient; -use subxt::{OnlineClient, PolkadotConfig}; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt::init(); - - let rpc = RpcClient::from_url("ws://localhost:9944".to_string()).await?; - let backend: ChainHeadBackend = - ChainHeadBackendBuilder::default().build_with_background_driver(rpc.clone()); - let api = OnlineClient::from_backend(std::sync::Arc::new(backend)).await?; - - let mut blocks_sub = api.blocks().subscribe_finalized().await?.take(100); - - while let Some(block) = blocks_sub.next().await { - let block = block?; - - let block_number = block.number(); - let block_hash = block.hash(); - - println!("Block #{block_number} ({block_hash})"); - } - - Ok(()) -} diff --git a/subxt/examples/storage_fetch.rs b/subxt/examples/storage_fetch.rs deleted file mode 100644 index 1fe491898c..0000000000 --- a/subxt/examples/storage_fetch.rs +++ /dev/null @@ -1,32 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - let account = dev::alice().public_key().into(); - - // Build a storage query to access account information. - let storage_query = polkadot::storage().system().account(); - - // Use that query to access a storage entry, fetch a result and decode the value. - // The static address knows that fetching requires a tuple of one value, an - // AccountId32. - let client_at = api.storage().at_latest().await?; - let account_info = client_at - .entry(storage_query)? - .fetch((account,)) - .await? - .decode()?; - - // The static address that we got from the subxt macro knows the expected input - // and return types, so it is decoded into a static type for us. - println!("Alice: {account_info:?}"); - Ok(()) -} diff --git a/subxt/examples/storage_fetch_dynamic.rs b/subxt/examples/storage_fetch_dynamic.rs deleted file mode 100644 index 61a81fef98..0000000000 --- a/subxt/examples/storage_fetch_dynamic.rs +++ /dev/null @@ -1,34 +0,0 @@ -#![allow(missing_docs)] -use subxt::dynamic::{At, Value}; -use subxt::utils::AccountId32; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a dynamic storage query to access account information. - // here, we assume that there is one value to provide at this entry - // to access a value; an AccountId32. In this example we don't know the - // return type and so we set it to `Value`, which anything can decode into. - let account: AccountId32 = dev::alice().public_key().into(); - let storage_query = subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); - - // Use that query to access a storage entry, fetch a result and decode the value. - let client_at = api.storage().at_latest().await?; - let account_info = client_at - .entry(storage_query)? - .fetch((account,)) - .await? - .decode()?; - - // With out `Value` type we can dig in to find what we want using the `At` - // trait and `.at()` method that this provides on the Value. - println!( - "Alice has free balance: {}", - account_info.at("data").at("free").unwrap() - ); - Ok(()) -} diff --git a/subxt/examples/storage_iterating.rs b/subxt/examples/storage_iterating.rs deleted file mode 100644 index 3ff74029bd..0000000000 --- a/subxt/examples/storage_iterating.rs +++ /dev/null @@ -1,42 +0,0 @@ -#![allow(missing_docs)] -use subxt::ext::futures::StreamExt; -use subxt::{OnlineClient, PolkadotConfig}; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a storage query to access account information. Same as if we were - // fetching a single value from this entry. - let storage_query = polkadot::storage().system().account(); - - // Use that query to access a storage entry, iterate over it and decode values. - let client_at = api.storage().at_latest().await?; - - // We provide an empty tuple when iterating. If the storage entry had been an N map with - // multiple keys, then we could provide any prefix of those keys to iterate over. This is - // statically type checked, so only a valid number/type of keys in the tuple is accepted. - let mut values = client_at.entry(storage_query)?.iter(()).await?; - - while let Some(kv) = values.next().await { - let kv = kv?; - - // The key decodes into the type that the static address knows about, in this case a - // tuple of one entry, because the only part of the key that we can decode is the - // AccountId32 for each user. - let (account_id32,) = kv.key()?.decode()?; - - // The value decodes into a statically generated type which holds account information. - let value = kv.value().decode()?; - - let value_data = value.data; - println!("{account_id32}:\n {value_data:?}"); - } - - Ok(()) -} diff --git a/subxt/examples/storage_iterating_dynamic.rs b/subxt/examples/storage_iterating_dynamic.rs deleted file mode 100644 index 443c977eef..0000000000 --- a/subxt/examples/storage_iterating_dynamic.rs +++ /dev/null @@ -1,42 +0,0 @@ -#![allow(missing_docs)] -use subxt::ext::futures::StreamExt; -use subxt::utils::AccountId32; -use subxt::{ - OnlineClient, PolkadotConfig, - dynamic::{At, Value}, -}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a dynamic storage query to access account information. - // here, we assume that there is one value to provide at this entry - // to access a value; an AccountId32. In this example we don't know the - // return type and so we set it to `Value`, which anything can decode into. - let storage_query = subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); - - // Use that query to access a storage entry, iterate over it and decode values. - let client_at = api.storage().at_latest().await?; - let mut values = client_at.entry(storage_query)?.iter(()).await?; - - while let Some(kv) = values.next().await { - let kv = kv?; - - // The key decodes into the first type we provided in the address. Since there's just - // one key, it is a tuple of one entry, an AccountId32. If we didn't know how many - // keys or their type, we could set the key to `Vec` instead. - let (account_id32,) = kv.key()?.decode()?; - - // The value decodes into the second type we provided in the address. In this example, - // we just decode it into our `Value` type and then look at the "data" field in this - // (which implicitly assumes we get a struct shaped thing back with such a field). - let value = kv.value().decode()?; - - let value_data = value.at("data").unwrap(); - println!("{account_id32}:\n {value_data}"); - } - - Ok(()) -} diff --git a/subxt/examples/substrate_compat_signer.rs b/subxt/examples/substrate_compat_signer.rs deleted file mode 100644 index 968adffe76..0000000000 --- a/subxt/examples/substrate_compat_signer.rs +++ /dev/null @@ -1,117 +0,0 @@ -//! This example demonstrates how to use to add a custom signer implementation to `subxt` -//! by using the signer implementation from polkadot-sdk. -//! -//! Similar functionality was provided by the `substrate-compat` feature in the original `subxt` crate. -//! which is now removed. - -#![allow(missing_docs, unused)] - -use sp_core::{Pair as _, sr25519}; -use subxt::config::substrate::MultiAddress; -use subxt::{Config, OnlineClient, PolkadotConfig}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -/// A concrete PairSigner implementation which relies on `sr25519::Pair` for signing -/// and that PolkadotConfig is the runtime configuration. -mod pair_signer { - use super::*; - use sp_runtime::{ - MultiSignature as SpMultiSignature, - traits::{IdentifyAccount, Verify}, - }; - use subxt::{ - config::substrate::{AccountId32, MultiSignature}, - tx::Signer, - }; - - /// A [`Signer`] implementation for [`sp_core::sr25519::Pair`]. - #[derive(Clone)] - pub struct PairSigner { - account_id: ::AccountId, - signer: sr25519::Pair, - } - - impl PairSigner { - /// Creates a new [`Signer`] from an [`sp_core::sr25519::Pair`]. - pub fn new(signer: sr25519::Pair) -> Self { - let account_id = - ::Signer::from(signer.public()).into_account(); - Self { - // Convert `sp_core::AccountId32` to `subxt::config::substrate::AccountId32`. - // - // This is necessary because we use `subxt::config::substrate::AccountId32` and no - // From/Into impls are provided between `sp_core::AccountId32` because `polkadot-sdk` isn't a direct - // dependency in subxt. - // - // This can also be done by provided a wrapper type around `subxt::config::substrate::AccountId32` to implement - // such conversions but that also most likely requires a custom `Config` with a separate `AccountId` type to work - // properly without additional hacks. - account_id: AccountId32(account_id.into()), - signer, - } - } - - /// Returns the [`sp_core::sr25519::Pair`] implementation used to construct this. - pub fn signer(&self) -> &sr25519::Pair { - &self.signer - } - - /// Return the account ID. - pub fn account_id(&self) -> &AccountId32 { - &self.account_id - } - } - - impl Signer for PairSigner { - fn account_id(&self) -> ::AccountId { - self.account_id.clone() - } - - fn sign(&self, signer_payload: &[u8]) -> ::Signature { - let signature = self.signer.sign(signer_payload); - MultiSignature::Sr25519(signature.0) - } - } -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - tracing_subscriber::fmt::init(); - - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - let signer = { - let acc = sr25519::Pair::from_string("//Alice", None)?; - pair_signer::PairSigner::new(acc) - }; - - let dest = { - let acc = sr25519::Pair::from_string("//Bob", None)?; - MultiAddress::Address32(acc.public().0) - }; - - // Build a balance transfer extrinsic. - let balance_transfer_tx = polkadot::tx() - .balances() - .transfer_allow_death(dest, 100_000); - - // Submit the balance transfer extrinsic from Alice, and wait for it to be successful - // and in a finalized block. We get back the extrinsic events if all is well. - let events = api - .tx() - .sign_and_submit_then_watch_default(&balance_transfer_tx, &signer) - .await? - .wait_for_finalized_success() - .await?; - - // Find a Transfer event and print it. - let transfer_event = events.find_first::()?; - if let Some(event) = transfer_event { - println!("Balance transfer success: {event:?}"); - } - - Ok(()) -} diff --git a/new/examples/tx_basic.rs b/subxt/examples/transactions_basic.rs similarity index 58% rename from new/examples/tx_basic.rs rename to subxt/examples/transactions_basic.rs index 0c2dc243eb..2867247886 100644 --- a/new/examples/tx_basic.rs +++ b/subxt/examples/transactions_basic.rs @@ -1,5 +1,5 @@ #![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; +use subxt::{Error, OnlineClient, PolkadotConfig}; use subxt_signer::sr25519::dev; // Generate an interface that we can use from the node's metadata. @@ -7,27 +7,33 @@ use subxt_signer::sr25519::dev; pub mod polkadot {} #[tokio::main] -async fn main() -> Result<(), Box> { +async fn main() -> Result<(), Error> { // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; + let config = PolkadotConfig::new(); + let api = OnlineClient::new(config).await?; + + // Almost all actions are performed at an explicit block. Here we use + // the current block at the time of running this. + let at_block = api.at_current_block().await?; // Build a balance transfer extrinsic. let dest = dev::bob().public_key().into(); - let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); + let balance_transfer_tx = polkadot::transactions() + .balances() + .transfer_allow_death(dest, 10_000); // Submit the balance transfer extrinsic from Alice, and wait for it to be successful // and in a finalized block. We get back the extrinsic events if all is well. let from = dev::alice(); - let events = api - .tx() + let events = at_block + .transactions() .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) .await? .wait_for_finalized_success() .await?; // Find a Transfer event and print it. - let transfer_event = events.find_first::()?; - if let Some(event) = transfer_event { + if let Some(event) = events.find_first::() { println!("Balance transfer success: {event:?}"); } diff --git a/subxt/examples/tx_basic.rs b/subxt/examples/tx_basic.rs deleted file mode 100644 index 0c2dc243eb..0000000000 --- a/subxt/examples/tx_basic.rs +++ /dev/null @@ -1,35 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a balance transfer extrinsic. - let dest = dev::bob().public_key().into(); - let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); - - // Submit the balance transfer extrinsic from Alice, and wait for it to be successful - // and in a finalized block. We get back the extrinsic events if all is well. - let from = dev::alice(); - let events = api - .tx() - .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) - .await? - .wait_for_finalized_success() - .await?; - - // Find a Transfer event and print it. - let transfer_event = events.find_first::()?; - if let Some(event) = transfer_event { - println!("Balance transfer success: {event:?}"); - } - - Ok(()) -} diff --git a/subxt/examples/tx_basic_frontier.rs b/subxt/examples/tx_basic_frontier.rs deleted file mode 100644 index 23b577a055..0000000000 --- a/subxt/examples/tx_basic_frontier.rs +++ /dev/null @@ -1,56 +0,0 @@ -//! Example to use subxt to talk to substrate-based nodes with ethereum accounts -//! which is not the default for subxt which is why we need to provide a custom config. -//! -//! This example requires to run a local frontier/moonbeam node to work. - -#![allow(missing_docs)] - -use subxt::OnlineClient; -use subxt_core::utils::AccountId20; -use subxt_signer::eth::{Signature, dev}; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/frontier_metadata_small.scale")] -mod eth_runtime {} - -enum EthRuntimeConfig {} - -impl subxt::Config for EthRuntimeConfig { - type AccountId = AccountId20; - type Address = AccountId20; - type Signature = Signature; - type Hasher = subxt::config::substrate::BlakeTwo256; - type Header = - subxt::config::substrate::SubstrateHeader; - type ExtrinsicParams = subxt::config::SubstrateExtrinsicParams; - type AssetId = u32; -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - let api = OnlineClient::::from_insecure_url("ws://127.0.0.1:9944").await?; - - let alith = dev::alith(); - let baltathar = dev::baltathar(); - let dest = baltathar.public_key().to_account_id(); - - println!("baltathar pub: {}", hex::encode(baltathar.public_key().0)); - println!("baltathar addr: {}", hex::encode(dest)); - - let balance_transfer_tx = eth_runtime::tx() - .balances() - .transfer_allow_death(dest, 10_001); - - let events = api - .tx() - .sign_and_submit_then_watch_default(&balance_transfer_tx, &alith) - .await? - .wait_for_finalized_success() - .await?; - - let transfer_event = events.find_first::()?; - if let Some(event) = transfer_event { - println!("Balance transfer success: {event:?}"); - } - - Ok(()) -} diff --git a/subxt/examples/tx_boxed.rs b/subxt/examples/tx_boxed.rs deleted file mode 100644 index 0dd4c4d2e0..0000000000 --- a/subxt/examples/tx_boxed.rs +++ /dev/null @@ -1,43 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - let api = OnlineClient::::new().await?; - - // Prepare some extrinsics. These are boxed so that they can live alongside each other. - let txs = [dynamic_remark(), balance_transfer(), remark()]; - - for tx in txs { - let from = dev::alice(); - api.tx() - .sign_and_submit_then_watch_default(&tx, &from) - .await? - .wait_for_finalized_success() - .await?; - - println!("Submitted tx"); - } - - Ok(()) -} - -fn balance_transfer() -> Box { - let dest = dev::bob().public_key().into(); - Box::new(polkadot::tx().balances().transfer_allow_death(dest, 10_000)) -} - -fn remark() -> Box { - Box::new(polkadot::tx().system().remark(vec![1, 2, 3, 4, 5])) -} - -fn dynamic_remark() -> Box { - use subxt::dynamic::{Value, tx}; - let tx_payload = tx("System", "remark", vec![Value::from_bytes("Hello")]); - - Box::new(tx_payload) -} diff --git a/subxt/examples/tx_partial.rs b/subxt/examples/tx_partial.rs deleted file mode 100644 index 0684091de6..0000000000 --- a/subxt/examples/tx_partial.rs +++ /dev/null @@ -1,53 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -type BoxedError = Box; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), BoxedError> { - // Spawned tasks require things held across await points to impl Send, - // so we use one to demonstrate that this is possible with `PartialTransaction` - tokio::spawn(signing_example()).await??; - Ok(()) -} - -async fn signing_example() -> Result<(), BoxedError> { - let api = OnlineClient::::new().await?; - - // Build a balance transfer extrinsic. - let dest = dev::bob().public_key().into(); - let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); - - let alice = dev::alice(); - - // Create partial tx, ready to be signed. - let mut partial_tx = api - .tx() - .create_partial( - &balance_transfer_tx, - &alice.public_key().to_account_id(), - Default::default(), - ) - .await?; - - // Simulate taking some time to get a signature back, in part to - // show that the `PartialTransaction` can be held across await points. - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - let signature = alice.sign(&partial_tx.signer_payload()); - - // Sign the transaction. - let tx = partial_tx - .sign_with_account_and_signature(&alice.public_key().to_account_id(), &signature.into()); - - // Submit it. - tx.submit_and_watch() - .await? - .wait_for_finalized_success() - .await?; - - Ok(()) -} diff --git a/subxt/examples/tx_status_stream.rs b/subxt/examples/tx_status_stream.rs deleted file mode 100644 index cdd55c4e82..0000000000 --- a/subxt/examples/tx_status_stream.rs +++ /dev/null @@ -1,55 +0,0 @@ -#![allow(missing_docs)] -use subxt::{OnlineClient, PolkadotConfig, tx::TxStatus}; -use subxt_signer::sr25519::dev; - -// Generate an interface that we can use from the node's metadata. -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a balance transfer extrinsic. - let dest = dev::bob().public_key().into(); - let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); - - // Submit the balance transfer extrinsic from Alice, and then monitor the - // progress of it. - let from = dev::alice(); - let mut balance_transfer_progress = api - .tx() - .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) - .await?; - - while let Some(status) = balance_transfer_progress.next().await { - match status? { - // It's finalized in a block! - TxStatus::InFinalizedBlock(in_block) => { - println!( - "Transaction {:?} is finalized in block {:?}", - in_block.extrinsic_hash(), - in_block.block_hash() - ); - - // grab the events and fail if no ExtrinsicSuccess event seen: - let events = in_block.wait_for_success().await?; - // We can look for events (this uses the static interface; we can also iterate - // over them and dynamically decode them): - let transfer_event = events.find_first::()?; - - if let Some(event) = transfer_event { - println!("Balance transfer success: {event:?}"); - } else { - println!("Failed to find Balances::Transfer Event"); - } - } - // Just log any other status we encounter: - other => { - println!("Status: {other:?}"); - } - } - } - Ok(()) -} diff --git a/subxt/examples/tx_with_params.rs b/subxt/examples/tx_with_params.rs deleted file mode 100644 index 00126a7f9f..0000000000 --- a/subxt/examples/tx_with_params.rs +++ /dev/null @@ -1,28 +0,0 @@ -#![allow(missing_docs)] -use subxt::config::polkadot::PolkadotExtrinsicParamsBuilder as Params; -use subxt::{OnlineClient, PolkadotConfig}; -use subxt_signer::sr25519::dev; - -#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -pub mod polkadot {} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // Create a new API client, configured to talk to Polkadot nodes. - let api = OnlineClient::::new().await?; - - // Build a balance transfer extrinsic. - let dest = dev::bob().public_key().into(); - let tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); - - // Configure the transaction parameters; we give a small tip and set the - // transaction to live for 32 blocks from the `latest_block` above. - let tx_params = Params::new().tip(1_000).mortal(32).build(); - - // submit the transaction: - let from = dev::alice(); - let hash = api.tx().sign_and_submit(&tx, &from, tx_params).await?; - println!("Balance transfer extrinsic submitted with hash : {hash}"); - - Ok(()) -} diff --git a/new/src/backend.rs b/subxt/src/backend.rs similarity index 91% rename from new/src/backend.rs rename to subxt/src/backend.rs index 7a324c7bc4..f531259fe6 100644 --- a/new/src/backend.rs +++ b/subxt/src/backend.rs @@ -263,25 +263,6 @@ impl BlockRef { /// to the block that it's associated with. pub trait BlockRefT: Send + Sync + 'static {} -/// Runtime version information needed to submit transactions. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct RuntimeVersion { - /// Version of the runtime specification. A full-node will not attempt to use its native - /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, - /// `spec_version` and `authoring_version` are the same between Wasm and native. - pub spec_version: u32, - /// All existing dispatches are fully compatible when this number doesn't change. If this - /// number changes, then `spec_version` must change, also. - /// - /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, - /// either through an alteration in its user-level semantics, a parameter - /// added/removed/changed, a dispatchable being removed, a module being removed, or a - /// dispatchable/module changing its index. - /// - /// It need *not* change when a new module is added or when a dispatchable is added. - pub transaction_version: u32, -} - /// A stream of some item. pub struct StreamOf(Pin + Send + 'static>>); diff --git a/new/src/backend/archive.rs b/subxt/src/backend/archive.rs similarity index 100% rename from new/src/backend/archive.rs rename to subxt/src/backend/archive.rs diff --git a/new/src/backend/archive/storage_stream.rs b/subxt/src/backend/archive/storage_stream.rs similarity index 100% rename from new/src/backend/archive/storage_stream.rs rename to subxt/src/backend/archive/storage_stream.rs diff --git a/new/src/backend/chain_head.rs b/subxt/src/backend/chain_head.rs similarity index 100% rename from new/src/backend/chain_head.rs rename to subxt/src/backend/chain_head.rs diff --git a/subxt/src/backend/chain_head/follow_stream.rs b/subxt/src/backend/chain_head/follow_stream.rs index f06b5d0677..b763a6270b 100644 --- a/subxt/src/backend/chain_head/follow_stream.rs +++ b/subxt/src/backend/chain_head/follow_stream.rs @@ -2,7 +2,7 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -use crate::config::{Config, HashFor}; +use crate::config::{Config, HashFor, RpcConfigFor}; use crate::error::BackendError; use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; use std::future::Future; @@ -103,7 +103,9 @@ impl FollowStream { } /// Create a new [`FollowStream`] given the RPC methods. - pub fn from_methods(methods: ChainHeadRpcMethods) -> FollowStream> { + pub fn from_methods( + methods: ChainHeadRpcMethods>, + ) -> FollowStream> { FollowStream { stream_getter: Box::new(move || { let methods = methods.clone(); @@ -112,9 +114,8 @@ impl FollowStream { let stream = methods.chainhead_v1_follow(true).await?; // Extract the subscription ID: let Some(sub_id) = stream.subscription_id().map(ToOwned::to_owned) else { - return Err(BackendError::Other( - "Subscription ID expected for chainHead_follow response, but not given" - .to_owned(), + return Err(BackendError::other( + "Subscription ID expected for chainHead_follow response, but not given", )); }; // Map stream errors into the higher level subxt one: @@ -311,7 +312,7 @@ pub mod test { Ok(FollowEvent::Stop), Ok(ev_new_block(1, 2)), // Nothing should be emitted after an error: - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), Ok(ev_new_block(2, 3)), ] }); diff --git a/subxt/src/backend/chain_head/follow_stream_driver.rs b/subxt/src/backend/chain_head/follow_stream_driver.rs index f1ff507729..0324f5ea35 100644 --- a/subxt/src/backend/chain_head/follow_stream_driver.rs +++ b/subxt/src/backend/chain_head/follow_stream_driver.rs @@ -537,7 +537,7 @@ mod test { Ok(ev_new_block(0, 1)), Ok(ev_best_block(1)), Ok(ev_finalized([1], [])), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 10, @@ -580,7 +580,7 @@ mod test { Ok(ev_finalized([1], [])), Ok(ev_new_block(1, 2)), Ok(ev_new_block(2, 3)), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 10, @@ -630,7 +630,7 @@ mod test { Ok(ev_new_block(1, 2)), Ok(ev_new_block(2, 3)), Ok(ev_finalized([1], [])), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 10, @@ -668,7 +668,7 @@ mod test { Ok(FollowEvent::Stop), Ok(ev_initialized(1)), Ok(ev_finalized([2], [])), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 10, @@ -714,7 +714,7 @@ mod test { // Emulate that we missed some blocks. Ok(ev_initialized(13)), Ok(ev_finalized([14], [])), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 10, diff --git a/subxt/src/backend/chain_head/follow_stream_unpin.rs b/subxt/src/backend/chain_head/follow_stream_unpin.rs index db0995f574..b8e9c144f8 100644 --- a/subxt/src/backend/chain_head/follow_stream_unpin.rs +++ b/subxt/src/backend/chain_head/follow_stream_unpin.rs @@ -4,7 +4,7 @@ use super::ChainHeadRpcMethods; use super::follow_stream::FollowStream; -use crate::config::{Config, Hash, HashFor}; +use crate::config::{Config, Hash, HashFor, RpcConfigFor}; use crate::error::BackendError; use futures::stream::{FuturesUnordered, Stream, StreamExt}; use subxt_rpcs::methods::chain_head::{ @@ -275,7 +275,7 @@ impl FollowStreamUnpin { /// Create a new [`FollowStreamUnpin`] given the RPC methods. pub fn from_methods( follow_stream: FollowStream>, - methods: ChainHeadRpcMethods, + methods: ChainHeadRpcMethods>, max_block_life: usize, ) -> FollowStreamUnpin> { let unpin_method = Box::new(move |hash: HashFor, sub_id: Arc| { @@ -567,7 +567,7 @@ mod test { Ok(ev_new_block(0, 1)), Ok(ev_new_block(1, 2)), Ok(ev_new_block(2, 3)), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 10, @@ -593,7 +593,7 @@ mod test { [ Ok(ev_initialized(0)), Ok(ev_finalized([1], [])), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 3, @@ -624,7 +624,7 @@ mod test { Ok(ev_finalized([3], [])), Ok(ev_finalized([4], [])), Ok(ev_finalized([5], [])), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 3, @@ -663,7 +663,7 @@ mod test { Ok(ev_new_block(1, 2)), Ok(ev_finalized([1], [])), Ok(ev_finalized([2], [])), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 10, @@ -711,7 +711,7 @@ mod test { Ok(ev_finalized([1], [])), Ok(ev_finalized([2], [3])), Ok(ev_finalized([4], [])), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 10, @@ -771,7 +771,7 @@ mod test { Ok(ev_best_block(1)), Ok(ev_finalized([1], [])), Ok(ev_finalized([2], [])), - Err(BackendError::Other("ended".to_owned())), + Err(BackendError::other("ended")), ] }, 10, diff --git a/subxt/src/backend/chain_head/mod.rs b/subxt/src/backend/chain_head/mod.rs deleted file mode 100644 index 18521ce08f..0000000000 --- a/subxt/src/backend/chain_head/mod.rs +++ /dev/null @@ -1,878 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module will expose a backend implementation based on the new APIs -//! described at . See -//! [`rpc_methods`] for the raw API calls. -//! -//! # Warning -//! -//! Everything in this module is **unstable**, meaning that it could change without -//! warning at any time. - -mod follow_stream; -mod follow_stream_driver; -mod follow_stream_unpin; -mod storage_items; - -use self::follow_stream_driver::FollowStreamFinalizedHeads; -use crate::backend::{ - Backend, BlockRef, BlockRefT, RuntimeVersion, StorageResponse, StreamOf, StreamOfResults, - TransactionStatus, utils::retry, -}; -use crate::config::{Config, Hash, HashFor}; -use crate::error::{BackendError, RpcError}; -use async_trait::async_trait; -use follow_stream_driver::{FollowStreamDriver, FollowStreamDriverHandle}; -use futures::future::Either; -use futures::{Stream, StreamExt}; -use std::collections::HashMap; -use std::task::Poll; -use storage_items::StorageItems; -use subxt_rpcs::RpcClient; -use subxt_rpcs::methods::chain_head::{ - FollowEvent, MethodResponse, RuntimeEvent, StorageQuery, StorageQueryType, StorageResultType, -}; - -/// Re-export RPC types and methods from [`subxt_rpcs::methods::chain_head`]. -pub mod rpc_methods { - pub use subxt_rpcs::methods::legacy::*; -} - -// Expose the RPC methods. -pub use subxt_rpcs::methods::chain_head::ChainHeadRpcMethods; - -/// Configure and build an [`ChainHeadBackend`]. -pub struct ChainHeadBackendBuilder { - max_block_life: usize, - transaction_timeout_secs: usize, - submit_transactions_ignoring_follow_events: bool, - _marker: std::marker::PhantomData, -} - -impl Default for ChainHeadBackendBuilder { - fn default() -> Self { - Self::new() - } -} - -impl ChainHeadBackendBuilder { - /// Create a new [`ChainHeadBackendBuilder`]. - pub fn new() -> Self { - Self { - max_block_life: usize::MAX, - transaction_timeout_secs: 240, - submit_transactions_ignoring_follow_events: false, - _marker: std::marker::PhantomData, - } - } - - /// The age of a block is defined here as the difference between the current finalized block number - /// and the block number of a given block. Once the difference equals or exceeds the number given - /// here, the block is unpinned. - /// - /// By default, we will never automatically unpin blocks, but if the number of pinned blocks that we - /// keep hold of exceeds the number that the server can tolerate, then a `stop` event is generated and - /// we are forced to resubscribe, losing any pinned blocks. - pub fn max_block_life(mut self, max_block_life: usize) -> Self { - self.max_block_life = max_block_life; - self - } - - /// When a transaction is submitted, we wait for events indicating it's successfully made it into a finalized - /// block. If it takes too long for this to happen, we assume that something went wrong and that we should - /// give up waiting. - /// - /// Provide a value here to denote how long, in seconds, to wait before giving up. Defaults to 240 seconds. - /// - /// If [`Self::submit_transactions_ignoring_follow_events()`] is called, this timeout is ignored. - pub fn transaction_timeout(mut self, timeout_secs: usize) -> Self { - self.transaction_timeout_secs = timeout_secs; - self - } - - /// When a transaction is submitted, we normally synchronize the events that we get back with events from - /// our background `chainHead_follow` subscription, to ensure that any blocks hashes that we see can be - /// immediately queried (for example to get events or state at that block), and are kept around unless they - /// are no longer needed. - /// - /// The main downside of this synchronization is that there may be a delay in being handed back a - /// [`TransactionStatus::InFinalizedBlock`] event while we wait to see the same block hash emitted from - /// our background `chainHead_follow` subscription in order to ensure it's available for querying. - /// - /// Calling this method turns off this synchronization, speeding up the response and removing any reliance - /// on the `chainHead_follow` subscription continuing to run without stopping throughout submitting a transaction. - /// - /// # Warning - /// - /// This can lead to errors when calling APIs like `wait_for_finalized_success`, which will try to retrieve events - /// at the finalized block, because there will be a race and the finalized block may not be available for querying - /// yet. - pub fn submit_transactions_ignoring_follow_events(mut self) -> Self { - self.submit_transactions_ignoring_follow_events = true; - self - } - - /// A low-level API to build the backend and driver which requires polling the driver for the backend - /// to make progress. - /// - /// This is useful if you want to manage the driver yourself, for example if you want to run it in on - /// a specific runtime. - /// - /// If you just want to run the driver in the background until completion in on the default runtime, - /// use [`ChainHeadBackendBuilder::build_with_background_driver`] instead. - pub fn build( - self, - client: impl Into, - ) -> (ChainHeadBackend, ChainHeadBackendDriver) { - // Construct the underlying follow_stream layers: - let rpc_methods = ChainHeadRpcMethods::new(client.into()); - let follow_stream = - follow_stream::FollowStream::>::from_methods(rpc_methods.clone()); - let follow_stream_unpin = - follow_stream_unpin::FollowStreamUnpin::>::from_methods( - follow_stream, - rpc_methods.clone(), - self.max_block_life, - ); - let follow_stream_driver = FollowStreamDriver::new(follow_stream_unpin); - - // Wrap these into the backend and driver that we'll expose. - let backend = ChainHeadBackend { - methods: rpc_methods, - follow_handle: follow_stream_driver.handle(), - transaction_timeout_secs: self.transaction_timeout_secs, - submit_transactions_ignoring_follow_events: self - .submit_transactions_ignoring_follow_events, - }; - let driver = ChainHeadBackendDriver { - driver: follow_stream_driver, - }; - - (backend, driver) - } - - /// An API to build the backend and driver which will run in the background until completion - /// on the default runtime. - /// - /// - On non-wasm targets, this will spawn the driver on `tokio`. - /// - On wasm targets, this will spawn the driver on `wasm-bindgen-futures`. - #[cfg(feature = "runtime")] - #[cfg_attr(docsrs, doc(cfg(feature = "runtime")))] - pub fn build_with_background_driver(self, client: impl Into) -> ChainHeadBackend { - fn spawn(future: F) { - #[cfg(not(target_family = "wasm"))] - tokio::spawn(async move { - future.await; - }); - #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] - wasm_bindgen_futures::spawn_local(async move { - future.await; - }); - } - - let (backend, mut driver) = self.build(client); - spawn(async move { - // NOTE: we need to poll the driver until it's done i.e returns None - // to ensure that the backend is shutdown properly. - while let Some(res) = driver.next().await { - if let Err(err) = res { - tracing::debug!(target: "subxt", "chainHead backend error={err}"); - } - } - - tracing::debug!(target: "subxt", "chainHead backend was closed"); - }); - - backend - } -} - -/// Driver for the [`ChainHeadBackend`]. This must be polled in order for the -/// backend to make progress. -#[derive(Debug)] -pub struct ChainHeadBackendDriver { - driver: FollowStreamDriver>, -} - -impl Stream for ChainHeadBackendDriver { - type Item = > as Stream>::Item; - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - self.driver.poll_next_unpin(cx) - } -} - -/// The chainHead backend. -#[derive(Debug, Clone)] -pub struct ChainHeadBackend { - // RPC methods we'll want to call: - methods: ChainHeadRpcMethods, - // A handle to the chainHead_follow subscription: - follow_handle: FollowStreamDriverHandle>, - // How long to wait until giving up on transactions: - transaction_timeout_secs: usize, - // Don't synchronise blocks with chainHead_follow when submitting txs: - submit_transactions_ignoring_follow_events: bool, -} - -impl ChainHeadBackend { - /// Configure and construct an [`ChainHeadBackend`] and the associated [`ChainHeadBackendDriver`]. - pub fn builder() -> ChainHeadBackendBuilder { - ChainHeadBackendBuilder::new() - } - - /// Stream block headers based on the provided filter fn - async fn stream_headers( - &self, - f: F, - ) -> Result>)>, BackendError> - where - F: Fn( - FollowEvent>>, - ) -> Vec>> - + Send - + Sync - + 'static, - { - let methods = self.methods.clone(); - - let headers = - FollowStreamFinalizedHeads::new(self.follow_handle.subscribe(), f).flat_map(move |r| { - let methods = methods.clone(); - - let (sub_id, block_refs) = match r { - Ok(ev) => ev, - Err(e) => return Either::Left(futures::stream::once(async { Err(e) })), - }; - - Either::Right( - futures::stream::iter(block_refs).filter_map(move |block_ref| { - let methods = methods.clone(); - let sub_id = sub_id.clone(); - - async move { - let res = methods - .chainhead_v1_header(&sub_id, block_ref.hash()) - .await - .transpose()?; - - let header = match res { - Ok(header) => header, - Err(e) => return Some(Err(e.into())), - }; - - Some(Ok((header, block_ref.into()))) - } - }), - ) - }); - - Ok(StreamOf(Box::pin(headers))) - } -} - -impl BlockRefT for follow_stream_unpin::BlockRef {} -impl From> for BlockRef { - fn from(b: follow_stream_unpin::BlockRef) -> Self { - BlockRef::new(b.hash(), b) - } -} - -impl super::sealed::Sealed for ChainHeadBackend {} - -#[async_trait] -impl Backend for ChainHeadBackend { - async fn storage_fetch_values( - &self, - keys: Vec>, - at: HashFor, - ) -> Result, BackendError> { - retry(|| async { - let queries = keys.iter().map(|key| StorageQuery { - key: &**key, - query_type: StorageQueryType::Value, - }); - - let storage_items = - StorageItems::from_methods(queries, at, &self.follow_handle, self.methods.clone()) - .await?; - - let stream = storage_items.filter_map(async |val| { - let val = match val { - Ok(val) => val, - Err(e) => return Some(Err(e)), - }; - - let StorageResultType::Value(result) = val.result else { - return None; - }; - Some(Ok(StorageResponse { - key: val.key.0, - value: result.0, - })) - }); - - Ok(StreamOf(Box::pin(stream))) - }) - .await - } - - async fn storage_fetch_descendant_keys( - &self, - key: Vec, - at: HashFor, - ) -> Result>, BackendError> { - retry(|| async { - // Ask for hashes, and then just ignore them and return the keys that come back. - let query = StorageQuery { - key: &*key, - query_type: StorageQueryType::DescendantsHashes, - }; - - let storage_items = StorageItems::from_methods( - std::iter::once(query), - at, - &self.follow_handle, - self.methods.clone(), - ) - .await?; - - let storage_result_stream = storage_items.map(|val| val.map(|v| v.key.0)); - Ok(StreamOf(Box::pin(storage_result_stream))) - }) - .await - } - - async fn storage_fetch_descendant_values( - &self, - key: Vec, - at: HashFor, - ) -> Result, BackendError> { - retry(|| async { - let query = StorageQuery { - key: &*key, - query_type: StorageQueryType::DescendantsValues, - }; - - let storage_items = StorageItems::from_methods( - std::iter::once(query), - at, - &self.follow_handle, - self.methods.clone(), - ) - .await?; - - let storage_result_stream = storage_items.filter_map(async |val| { - let val = match val { - Ok(val) => val, - Err(e) => return Some(Err(e)), - }; - - let StorageResultType::Value(result) = val.result else { - return None; - }; - Some(Ok(StorageResponse { - key: val.key.0, - value: result.0, - })) - }); - - Ok(StreamOf(Box::pin(storage_result_stream))) - }) - .await - } - - async fn genesis_hash(&self) -> Result, BackendError> { - retry(|| async { - let genesis_hash = self.methods.chainspec_v1_genesis_hash().await?; - Ok(genesis_hash) - }) - .await - } - - async fn block_header(&self, at: HashFor) -> Result, BackendError> { - retry(|| async { - let sub_id = get_subscription_id(&self.follow_handle).await?; - let header = self.methods.chainhead_v1_header(&sub_id, at).await?; - Ok(header) - }) - .await - } - - async fn block_body(&self, at: HashFor) -> Result>>, BackendError> { - retry(|| async { - let sub_id = get_subscription_id(&self.follow_handle).await?; - - // Subscribe to the body response and get our operationId back. - let follow_events = self.follow_handle.subscribe().events(); - let status = self.methods.chainhead_v1_body(&sub_id, at).await?; - let operation_id = match status { - MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()), - MethodResponse::Started(s) => s.operation_id, - }; - - // Wait for the response to come back with the correct operationId. - let mut exts_stream = follow_events.filter_map(|ev| { - let FollowEvent::OperationBodyDone(body) = ev else { - return std::future::ready(None); - }; - if body.operation_id != operation_id { - return std::future::ready(None); - } - let exts: Vec<_> = body.value.into_iter().map(|ext| ext.0).collect(); - std::future::ready(Some(exts)) - }); - - Ok(exts_stream.next().await) - }) - .await - } - - async fn latest_finalized_block_ref(&self) -> Result>, BackendError> { - let next_ref: Option>> = self - .follow_handle - .subscribe() - .events() - .filter_map(|ev| { - let out = match ev { - FollowEvent::Initialized(init) => { - init.finalized_block_hashes.last().map(|b| b.clone().into()) - } - _ => None, - }; - std::future::ready(out) - }) - .next() - .await; - - next_ref.ok_or_else(|| RpcError::SubscriptionDropped.into()) - } - - async fn current_runtime_version(&self) -> Result { - // Just start a stream of version infos, and return the first value we get from it. - let runtime_version = self.stream_runtime_version().await?.next().await; - match runtime_version { - None => Err(BackendError::Rpc(RpcError::SubscriptionDropped)), - Some(Err(e)) => Err(e), - Some(Ok(version)) => Ok(version), - } - } - - async fn stream_runtime_version( - &self, - ) -> Result, BackendError> { - // Keep track of runtime details announced in new blocks, and then when blocks - // are finalized, find the latest of these that has runtime details, and clear the rest. - let mut runtimes = HashMap::new(); - let runtime_stream = self - .follow_handle - .subscribe() - .events() - .filter_map(move |ev| { - let output = match ev { - FollowEvent::Initialized(ev) => { - for finalized_block in ev.finalized_block_hashes { - runtimes.remove(&finalized_block.hash()); - } - ev.finalized_block_runtime - } - FollowEvent::NewBlock(ev) => { - if let Some(runtime) = ev.new_runtime { - runtimes.insert(ev.block_hash.hash(), runtime); - } - None - } - FollowEvent::Finalized(ev) => { - let next_runtime = { - let mut it = ev - .finalized_block_hashes - .iter() - .rev() - .filter_map(|h| runtimes.get(&h.hash()).cloned()) - .peekable(); - - let next = it.next(); - - if it.peek().is_some() { - tracing::warn!( - target: "subxt", - "Several runtime upgrades in the finalized blocks but only the latest runtime upgrade is returned" - ); - } - - next - }; - - // Remove finalized and pruned blocks as valid runtime upgrades. - for block in ev - .finalized_block_hashes - .iter() - .chain(ev.pruned_block_hashes.iter()) - { - runtimes.remove(&block.hash()); - } - - next_runtime - } - _ => None, - }; - - let runtime_event = match output { - None => return std::future::ready(None), - Some(ev) => ev, - }; - - let runtime_details = match runtime_event { - RuntimeEvent::Invalid(err) => { - return std::future::ready(Some(Err(BackendError::Other(format!("Invalid runtime error using chainHead RPCs: {}", err.error))))) - } - RuntimeEvent::Valid(ev) => ev, - }; - - let runtime_version = RuntimeVersion { - spec_version: runtime_details.spec.spec_version, - transaction_version: runtime_details.spec.transaction_version - }; - std::future::ready(Some(Ok(runtime_version))) - }); - - Ok(StreamOf::new(Box::pin(runtime_stream))) - } - - async fn stream_all_block_headers( - &self, - _hasher: T::Hasher, - ) -> Result>)>, BackendError> { - // TODO: https://github.com/paritytech/subxt/issues/1568 - // - // It's possible that blocks may be silently missed if - // a reconnection occurs because it's restarted by the unstable backend. - self.stream_headers(|ev| match ev { - FollowEvent::Initialized(init) => init.finalized_block_hashes, - FollowEvent::NewBlock(ev) => { - vec![ev.block_hash] - } - _ => vec![], - }) - .await - } - - async fn stream_best_block_headers( - &self, - _hasher: T::Hasher, - ) -> Result>)>, BackendError> { - // TODO: https://github.com/paritytech/subxt/issues/1568 - // - // It's possible that blocks may be silently missed if - // a reconnection occurs because it's restarted by the unstable backend. - self.stream_headers(|ev| match ev { - FollowEvent::Initialized(init) => init.finalized_block_hashes, - FollowEvent::BestBlockChanged(ev) => vec![ev.best_block_hash], - _ => vec![], - }) - .await - } - - async fn stream_finalized_block_headers( - &self, - _hasher: T::Hasher, - ) -> Result>)>, BackendError> { - self.stream_headers(|ev| match ev { - FollowEvent::Initialized(init) => init.finalized_block_hashes, - FollowEvent::Finalized(ev) => ev.finalized_block_hashes, - _ => vec![], - }) - .await - } - - async fn submit_transaction( - &self, - extrinsic: &[u8], - ) -> Result>>, BackendError> { - // Submit a transaction. This makes no attempt to sync with follow events, - async fn submit_transaction_ignoring_follow_events( - extrinsic: &[u8], - methods: &ChainHeadRpcMethods, - ) -> Result>>, BackendError> { - let tx_progress = methods - .transactionwatch_v1_submit_and_watch(extrinsic) - .await? - .map(|ev| { - ev.map(|tx_status| { - use subxt_rpcs::methods::chain_head::TransactionStatus as RpcTransactionStatus; - match tx_status { - RpcTransactionStatus::Validated => TransactionStatus::Validated, - RpcTransactionStatus::Broadcasted => TransactionStatus::Broadcasted, - RpcTransactionStatus::BestChainBlockIncluded { block: None } => { - TransactionStatus::NoLongerInBestBlock - }, - RpcTransactionStatus::BestChainBlockIncluded { block: Some(block) } => { - TransactionStatus::InBestBlock { hash: BlockRef::from_hash(block.hash) } - }, - RpcTransactionStatus::Finalized { block } => { - TransactionStatus::InFinalizedBlock { hash: BlockRef::from_hash(block.hash) } - }, - RpcTransactionStatus::Error { error } => { - TransactionStatus::Error { message: error } - }, - RpcTransactionStatus::Invalid { error } => { - TransactionStatus::Invalid { message: error } - }, - RpcTransactionStatus::Dropped { error } => { - TransactionStatus::Dropped { message: error } - }, - } - }).map_err(Into::into) - }); - - Ok(StreamOf(Box::pin(tx_progress))) - } - - // Submit a transaction. This synchronizes with chainHead_follow events to ensure - // that block hashes returned are ready to be queried. - async fn submit_transaction_tracking_follow_events( - extrinsic: &[u8], - transaction_timeout_secs: u64, - methods: &ChainHeadRpcMethods, - follow_handle: &FollowStreamDriverHandle>, - ) -> Result>>, BackendError> { - // We care about new and finalized block hashes. - enum SeenBlockMarker { - New, - Finalized, - } - - // First, subscribe to new blocks. - let mut seen_blocks_sub = follow_handle.subscribe().events(); - - // Then, submit the transaction. - let mut tx_progress = methods - .transactionwatch_v1_submit_and_watch(extrinsic) - .await?; - - let mut seen_blocks = HashMap::new(); - let mut done = false; - - // If we see the finalized event, we start waiting until we find a finalized block that - // matches, so we can guarantee to return a pinned block hash and be properly in sync - // with chainHead_follow. - let mut finalized_hash: Option> = None; - - // Record the start time so that we can time out if things appear to take too long. - let start_instant = web_time::Instant::now(); - - // A quick helper to return a generic error. - let err_other = |s: &str| Some(Err(BackendError::Other(s.into()))); - - // Now we can attempt to associate tx events with pinned blocks. - let tx_stream = futures::stream::poll_fn(move |cx| { - loop { - // Bail early if we're finished; nothing else to do. - if done { - return Poll::Ready(None); - } - - // Bail if we exceed 4 mins; something very likely went wrong. - if start_instant.elapsed().as_secs() > transaction_timeout_secs { - return Poll::Ready(err_other( - "Timeout waiting for the transaction to be finalized", - )); - } - - // Poll for a follow event, and error if the stream has unexpectedly ended. - let follow_ev_poll = match seen_blocks_sub.poll_next_unpin(cx) { - Poll::Ready(None) => { - return Poll::Ready(err_other( - "chainHead_follow stream ended unexpectedly", - )); - } - Poll::Ready(Some(follow_ev)) => Poll::Ready(follow_ev), - Poll::Pending => Poll::Pending, - }; - let follow_ev_is_pending = follow_ev_poll.is_pending(); - - // If there was a follow event, then handle it and loop around to see if there are more. - // We want to buffer follow events until we hit Pending, so that we are as up-to-date as possible - // for when we see a BestBlockChanged event, so that we have the best change of already having - // seen the block that it mentions and returning a proper pinned block. - if let Poll::Ready(follow_ev) = follow_ev_poll { - match follow_ev { - FollowEvent::NewBlock(ev) => { - // Optimization: once we have a `finalized_hash`, we only care about finalized - // block refs now and can avoid bothering to save new blocks. - if finalized_hash.is_none() { - seen_blocks.insert( - ev.block_hash.hash(), - (SeenBlockMarker::New, ev.block_hash), - ); - } - } - FollowEvent::Finalized(ev) => { - for block_ref in ev.finalized_block_hashes { - seen_blocks.insert( - block_ref.hash(), - (SeenBlockMarker::Finalized, block_ref), - ); - } - } - FollowEvent::Stop => { - // If we get this event, we'll lose all of our existing pinned blocks and have a gap - // in which we may lose the finalized block that the TX is in. For now, just error if - // this happens, to prevent the case in which we never see a finalized block and wait - // forever. - return Poll::Ready(err_other( - "chainHead_follow emitted 'stop' event during transaction submission", - )); - } - _ => {} - } - continue; - } - - // If we have a finalized hash, we are done looking for tx events and we are just waiting - // for a pinned block with a matching hash (which must appear eventually given it's finalized). - if let Some(hash) = &finalized_hash { - if let Some((SeenBlockMarker::Finalized, block_ref)) = - seen_blocks.remove(hash) - { - // Found it! Hand back the event with a pinned block. We're done. - done = true; - let ev = TransactionStatus::InFinalizedBlock { - hash: block_ref.into(), - }; - return Poll::Ready(Some(Ok(ev))); - } else { - // Not found it! If follow ev is pending, then return pending here and wait for - // a new one to come in, else loop around and see if we get another one immediately. - seen_blocks.clear(); - if follow_ev_is_pending { - return Poll::Pending; - } else { - continue; - } - } - } - - // If we don't have a finalized block yet, we keep polling for tx progress events. - let tx_progress_ev = match tx_progress.poll_next_unpin(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(None) => { - return Poll::Ready(err_other( - "No more transaction progress events, but we haven't seen a Finalized one yet", - )); - } - Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e.into()))), - Poll::Ready(Some(Ok(ev))) => ev, - }; - - // When we get one, map it to the correct format (or for finalized ev, wait for the pinned block): - use subxt_rpcs::methods::chain_head::TransactionStatus as RpcTransactionStatus; - let tx_progress_ev = match tx_progress_ev { - RpcTransactionStatus::Finalized { block } => { - // We'll wait until we have seen this hash, to try to guarantee - // that when we return this event, the corresponding block is - // pinned and accessible. - finalized_hash = Some(block.hash); - continue; - } - RpcTransactionStatus::BestChainBlockIncluded { block: Some(block) } => { - // Look up a pinned block ref if we can, else return a non-pinned - // block that likely isn't accessible. We have no guarantee that a best - // block on the node a tx was sent to will ever be known about on the - // chainHead_follow subscription. - let block_ref = match seen_blocks.get(&block.hash) { - Some((_, block_ref)) => block_ref.clone().into(), - None => BlockRef::from_hash(block.hash), - }; - TransactionStatus::InBestBlock { hash: block_ref } - } - RpcTransactionStatus::BestChainBlockIncluded { block: None } => { - TransactionStatus::NoLongerInBestBlock - } - RpcTransactionStatus::Broadcasted => TransactionStatus::Broadcasted, - RpcTransactionStatus::Dropped { error, .. } => { - TransactionStatus::Dropped { message: error } - } - RpcTransactionStatus::Error { error } => { - TransactionStatus::Error { message: error } - } - RpcTransactionStatus::Invalid { error } => { - TransactionStatus::Invalid { message: error } - } - RpcTransactionStatus::Validated => TransactionStatus::Validated, - }; - return Poll::Ready(Some(Ok(tx_progress_ev))); - } - }); - - Ok(StreamOf(Box::pin(tx_stream))) - } - - if self.submit_transactions_ignoring_follow_events { - submit_transaction_ignoring_follow_events(extrinsic, &self.methods).await - } else { - submit_transaction_tracking_follow_events::( - extrinsic, - self.transaction_timeout_secs as u64, - &self.methods, - &self.follow_handle, - ) - .await - } - } - - async fn call( - &self, - method: &str, - call_parameters: Option<&[u8]>, - at: HashFor, - ) -> Result, BackendError> { - retry(|| async { - let sub_id = get_subscription_id(&self.follow_handle).await?; - - // Subscribe to the body response and get our operationId back. - let follow_events = self.follow_handle.subscribe().events(); - let call_parameters = call_parameters.unwrap_or(&[]); - let status = self - .methods - .chainhead_v1_call(&sub_id, at, method, call_parameters) - .await?; - let operation_id = match status { - MethodResponse::LimitReached => return Err(RpcError::LimitReached.into()), - MethodResponse::Started(s) => s.operation_id, - }; - - // Wait for the response to come back with the correct operationId. - let mut call_data_stream = follow_events.filter_map(|ev| { - let FollowEvent::OperationCallDone(body) = ev else { - return std::future::ready(None); - }; - if body.operation_id != operation_id { - return std::future::ready(None); - } - std::future::ready(Some(body.output.0)) - }); - - call_data_stream - .next() - .await - .ok_or_else(|| RpcError::SubscriptionDropped.into()) - }) - .await - } -} - -/// A helper to obtain a subscription ID. -async fn get_subscription_id( - follow_handle: &FollowStreamDriverHandle, -) -> Result { - let Some(sub_id) = follow_handle.subscribe().subscription_id().await else { - return Err(RpcError::SubscriptionDropped.into()); - }; - - Ok(sub_id) -} diff --git a/subxt/src/backend/chain_head/storage_items.rs b/subxt/src/backend/chain_head/storage_items.rs index 6519e63a67..31cbea8c09 100644 --- a/subxt/src/backend/chain_head/storage_items.rs +++ b/subxt/src/backend/chain_head/storage_items.rs @@ -4,7 +4,7 @@ use super::follow_stream_driver::FollowStreamDriverHandle; use super::follow_stream_unpin::BlockRef; -use crate::config::{Config, HashFor}; +use crate::config::{Config, HashFor, RpcConfigFor}; use crate::error::{BackendError, RpcError}; use futures::{FutureExt, Stream, StreamExt}; use std::collections::VecDeque; @@ -35,7 +35,7 @@ impl StorageItems { queries: impl Iterator>, at: HashFor, follow_handle: &FollowStreamDriverHandle>, - methods: ChainHeadRpcMethods, + methods: ChainHeadRpcMethods>, ) -> Result { let sub_id = super::get_subscription_id(follow_handle).await?; @@ -157,7 +157,7 @@ impl Stream for StorageItems { FollowEvent::OperationError(err) if err.operation_id == *self.operation_id => { // Something went wrong obtaining storage items; mark as done and return the error. self.done = true; - return Poll::Ready(Some(Err(BackendError::Other(err.error)))); + return Poll::Ready(Some(Err(BackendError::other(err.error)))); } _ => { // We don't care about this event; wait for the next. diff --git a/new/src/backend/combined.rs b/subxt/src/backend/combined.rs similarity index 98% rename from new/src/backend/combined.rs rename to subxt/src/backend/combined.rs index 7870c37b5f..85ca7dc684 100644 --- a/new/src/backend/combined.rs +++ b/subxt/src/backend/combined.rs @@ -90,10 +90,15 @@ impl CombinedBackendBuilder { let rpc_client = rpc_client.into(); // What does the thing wer're talking to actually know about? - let methods: Vec = rpc_client + #[derive(serde::Deserialize)] + struct Methods { + methods: Vec, + } + let methods: Methods = rpc_client .request("rpc_methods", subxt_rpcs::rpc_params![]) .await .map_err(CombinedBackendError::CouldNotObtainRpcMethodList)?; + let methods = methods.methods; let has_archive_methods = methods.iter().any(|m| m.starts_with("archive_v1_")); let has_chainhead_methods = methods.iter().any(|m| m.starts_with("chainHead_v1")); diff --git a/subxt/src/backend/legacy.rs b/subxt/src/backend/legacy.rs index d6edcdbd7d..d7aadf4989 100644 --- a/subxt/src/backend/legacy.rs +++ b/subxt/src/backend/legacy.rs @@ -5,29 +5,22 @@ //! This module exposes a legacy backend implementation, which relies //! on the legacy RPC API methods. -use self::rpc_methods::TransactionStatus as RpcTransactionStatus; +mod descendant_streams; + use crate::backend::utils::{retry, retry_stream}; use crate::backend::{ - Backend, BlockRef, RuntimeVersion, StorageResponse, StreamOf, StreamOfResults, - TransactionStatus, + Backend, BlockRef, StorageResponse, StreamOf, StreamOfResults, TransactionStatus, }; -use crate::config::{Config, HashFor, Header}; +use crate::config::{Config, HashFor, Hasher, Header, RpcConfigFor}; use crate::error::BackendError; use async_trait::async_trait; +use codec::Encode; +use descendant_streams::{StorageFetchDescendantKeysStream, StorageFetchDescendantValuesStream}; use futures::TryStreamExt; -use futures::{Future, FutureExt, Stream, StreamExt, future, future::Either, stream}; -use std::collections::VecDeque; -use std::pin::Pin; -use std::task::{Context, Poll}; +use futures::{Future, Stream, StreamExt, future, future::Either, stream}; use subxt_rpcs::RpcClient; - -/// Re-export legacy RPC types and methods from [`subxt_rpcs::methods::legacy`]. -pub mod rpc_methods { - pub use subxt_rpcs::methods::legacy::*; -} - -// Expose the RPC methods. -pub use rpc_methods::LegacyRpcMethods; +use subxt_rpcs::methods::legacy::NumberOrHex; +use subxt_rpcs::methods::legacy::{LegacyRpcMethods, TransactionStatus as RpcTransactionStatus}; /// Configure and build an [`LegacyBackend`]. pub struct LegacyBackendBuilder { @@ -72,7 +65,7 @@ impl LegacyBackendBuilder { #[derive(Debug)] pub struct LegacyBackend { storage_page_size: u32, - methods: LegacyRpcMethods, + methods: LegacyRpcMethods>, } impl Clone for LegacyBackend { @@ -94,7 +87,7 @@ impl LegacyBackend { impl super::sealed::Sealed for LegacyBackend {} #[async_trait] -impl Backend for LegacyBackend { +impl Backend for LegacyBackend { async fn storage_fetch_values( &self, keys: Vec>, @@ -103,7 +96,7 @@ impl Backend for LegacyBackend { fn get_entry( key: Vec, at: HashFor, - methods: LegacyRpcMethods, + methods: LegacyRpcMethods>, ) -> impl Future, BackendError>> { retry(move || { let methods = methods.clone(); @@ -137,15 +130,12 @@ impl Backend for LegacyBackend { key: Vec, at: HashFor, ) -> Result>, BackendError> { - let keys = StorageFetchDescendantKeysStream { - at, + let keys = StorageFetchDescendantKeysStream::new( + self.methods.clone(), key, - storage_page_size: self.storage_page_size, - methods: self.methods.clone(), - done: Default::default(), - keys_fut: Default::default(), - pagination_start_key: None, - }; + at, + self.storage_page_size, + ); let keys = keys.flat_map(|keys| { match keys { @@ -168,21 +158,14 @@ impl Backend for LegacyBackend { key: Vec, at: HashFor, ) -> Result, BackendError> { - let keys_stream = StorageFetchDescendantKeysStream { - at, + let values_stream = StorageFetchDescendantValuesStream::new( + self.methods.clone(), key, - storage_page_size: self.storage_page_size, - methods: self.methods.clone(), - done: Default::default(), - keys_fut: Default::default(), - pagination_start_key: None, - }; + at, + self.storage_page_size, + ); - Ok(StreamOf(Box::pin(StorageFetchDescendantValuesStream { - keys: keys_stream, - results_fut: None, - results: Default::default(), - }))) + Ok(StreamOf(Box::pin(values_stream))) } async fn genesis_hash(&self) -> Result, BackendError> { @@ -193,6 +176,22 @@ impl Backend for LegacyBackend { .await } + async fn block_number_to_hash( + &self, + number: u64, + ) -> Result>>, BackendError> { + retry(|| async { + let number_or_hash = NumberOrHex::Number(number); + let hash = self + .methods + .chain_get_block_hash(Some(number_or_hash)) + .await? + .map(BlockRef::from_hash); + Ok(hash) + }) + .await + } + async fn block_header(&self, at: HashFor) -> Result, BackendError> { retry(|| async { let header = self.methods.chain_get_header(Some(at)).await?; @@ -221,56 +220,6 @@ impl Backend for LegacyBackend { .await } - async fn current_runtime_version(&self) -> Result { - retry(|| async { - let details = self.methods.state_get_runtime_version(None).await?; - Ok(RuntimeVersion { - spec_version: details.spec_version, - transaction_version: details.transaction_version, - }) - }) - .await - } - - async fn stream_runtime_version( - &self, - ) -> Result, BackendError> { - let methods = self.methods.clone(); - - let retry_sub = retry_stream(move || { - let methods = methods.clone(); - - Box::pin(async move { - let sub = methods.state_subscribe_runtime_version().await?; - let sub = sub.map_err(|e| e.into()).map(|r| { - r.map(|v| RuntimeVersion { - spec_version: v.spec_version, - transaction_version: v.transaction_version, - }) - }); - Ok(StreamOf(Box::pin(sub))) - }) - }) - .await?; - - // For runtime version subscriptions we omit the `DisconnectedWillReconnect` error - // because the once it resubscribes it will emit the latest runtime version. - // - // Thus, it's technically possible that a runtime version can be missed if - // two runtime upgrades happen in quick succession, but this is very unlikely. - let stream = retry_sub.filter(|r| { - let mut keep = true; - if let Err(e) = r { - if e.is_disconnected_will_reconnect() { - keep = false; - } - } - async move { keep } - }); - - Ok(StreamOf(Box::pin(stream))) - } - async fn stream_all_block_headers( &self, hasher: T::Hasher, @@ -278,11 +227,12 @@ impl Backend for LegacyBackend { let methods = self.methods.clone(); let retry_sub = retry_stream(move || { let methods = methods.clone(); + let hasher = hasher.clone(); Box::pin(async move { let sub = methods.chain_subscribe_all_heads().await?; let sub = sub.map_err(|e| e.into()).map(move |r| { r.map(|h| { - let hash = h.hash_with(hasher); + let hash = hasher.hash(&h.encode()); (h, BlockRef::from_hash(hash)) }) }); @@ -302,11 +252,12 @@ impl Backend for LegacyBackend { let retry_sub = retry_stream(move || { let methods = methods.clone(); + let hasher = hasher.clone(); Box::pin(async move { let sub = methods.chain_subscribe_new_heads().await?; let sub = sub.map_err(|e| e.into()).map(move |r| { r.map(|h| { - let hash = h.hash_with(hasher); + let hash = hasher.hash(&h.encode()); (h, BlockRef::from_hash(hash)) }) }); @@ -326,6 +277,7 @@ impl Backend for LegacyBackend { let retry_sub = retry_stream(move || { let this = this.clone(); + let hasher = hasher.clone(); Box::pin(async move { let sub = this.methods.chain_subscribe_finalized_heads().await?; @@ -345,7 +297,7 @@ impl Backend for LegacyBackend { ); let sub = sub.map(move |r| { r.map(|h| { - let hash = h.hash_with(hasher); + let hash = hasher.hash(&h.encode()); (h, BlockRef::from_hash(hash)) }) }); @@ -439,7 +391,7 @@ impl Backend for LegacyBackend { /// without notice in a patch release. #[doc(hidden)] pub fn subscribe_to_block_headers_filling_in_gaps( - methods: LegacyRpcMethods, + methods: LegacyRpcMethods>, sub: S, mut last_block_num: Option, ) -> impl Stream> + Send @@ -482,181 +434,3 @@ where Either::Right(previous_headers.chain(stream::once(async { Ok(header) }))) }) } - -/// This provides a stream of values given some prefix `key`. It -/// internally manages pagination and such. -#[allow(clippy::type_complexity)] -pub struct StorageFetchDescendantKeysStream { - methods: LegacyRpcMethods, - key: Vec, - at: HashFor, - // How many entries to ask for each time. - storage_page_size: u32, - // What key do we start paginating from? None = from the beginning. - pagination_start_key: Option>, - // Keys, future and cached: - keys_fut: - Option>, BackendError>> + Send + 'static>>>, - // Set to true when we're done: - done: bool, -} - -impl std::marker::Unpin for StorageFetchDescendantKeysStream {} - -impl Stream for StorageFetchDescendantKeysStream { - type Item = Result>, BackendError>; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.as_mut(); - loop { - // We're already done. - if this.done { - return Poll::Ready(None); - } - - // Poll future to fetch next keys. - if let Some(mut keys_fut) = this.keys_fut.take() { - let Poll::Ready(keys) = keys_fut.poll_unpin(cx) else { - this.keys_fut = Some(keys_fut); - return Poll::Pending; - }; - - match keys { - Ok(mut keys) => { - if this.pagination_start_key.is_some() - && keys.first() == this.pagination_start_key.as_ref() - { - // Currently, Smoldot returns the "start key" as the first key in the input - // (see https://github.com/smol-dot/smoldot/issues/1692), whereas Substrate doesn't. - // We don't expect the start key to be returned either (since it was the last key of prev - // iteration), so remove it if we see it. This `remove()` method isn't very efficient but - // this will be a non issue with the RPC V2 APIs or if Smoldot aligns with Substrate anyway. - keys.remove(0); - } - if keys.is_empty() { - // No keys left; we're done! - this.done = true; - return Poll::Ready(None); - } - // The last key is where we want to paginate from next time. - this.pagination_start_key = keys.last().cloned(); - // return all of the keys from this run. - return Poll::Ready(Some(Ok(keys))); - } - Err(e) => { - if e.is_disconnected_will_reconnect() { - this.keys_fut = Some(keys_fut); - continue; - } - - // Error getting keys? Return it. - return Poll::Ready(Some(Err(e))); - } - } - } - - // Else, we don't have a fut to get keys yet so start one going. - let methods = this.methods.clone(); - let key = this.key.clone(); - let at = this.at; - let storage_page_size = this.storage_page_size; - let pagination_start_key = this.pagination_start_key.clone(); - let keys_fut = async move { - let keys = methods - .state_get_keys_paged( - &key, - storage_page_size, - pagination_start_key.as_deref(), - Some(at), - ) - .await?; - Ok(keys) - }; - this.keys_fut = Some(Box::pin(keys_fut)); - } - } -} - -/// This provides a stream of values given some stream of keys. -#[allow(clippy::type_complexity)] -pub struct StorageFetchDescendantValuesStream { - // Stream of keys. - keys: StorageFetchDescendantKeysStream, - // Then we track the future to get the values back for each key: - results_fut: Option< - Pin< - Box< - dyn Future, Vec)>>, BackendError>> - + Send - + 'static, - >, - >, - >, - // And finally we return each result back one at a time: - results: VecDeque<(Vec, Vec)>, -} - -impl Stream for StorageFetchDescendantValuesStream { - type Item = Result; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let mut this = self.as_mut(); - loop { - // If we have results back, return them one by one - if let Some((key, value)) = this.results.pop_front() { - let res = StorageResponse { key, value }; - return Poll::Ready(Some(Ok(res))); - } - - // If we're waiting on the next results then poll that future: - if let Some(mut results_fut) = this.results_fut.take() { - match results_fut.poll_unpin(cx) { - Poll::Ready(Ok(Some(results))) => { - this.results = results; - continue; - } - Poll::Ready(Ok(None)) => { - // No values back for some keys? Skip. - continue; - } - Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e))), - Poll::Pending => { - this.results_fut = Some(results_fut); - return Poll::Pending; - } - } - } - - match this.keys.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(keys))) => { - let methods = this.keys.methods.clone(); - let at = this.keys.at; - let results_fut = async move { - let keys = keys.iter().map(|k| &**k); - let values = retry(|| async { - let res = methods - .state_query_storage_at(keys.clone(), Some(at)) - .await?; - Ok(res) - }) - .await?; - let values: VecDeque<_> = values - .into_iter() - .flat_map(|v| { - v.changes.into_iter().filter_map(|(k, v)| { - let v = v?; - Some((k.0, v.0)) - }) - }) - .collect(); - Ok(Some(values)) - }; - - this.results_fut = Some(Box::pin(results_fut)); - continue; - } - Poll::Ready(Some(Err(e))) => return Poll::Ready(Some(Err(e))), - Poll::Ready(None) => return Poll::Ready(None), - Poll::Pending => return Poll::Pending, - } - } - } -} diff --git a/new/src/backend/legacy/descendant_streams.rs b/subxt/src/backend/legacy/descendant_streams.rs similarity index 100% rename from new/src/backend/legacy/descendant_streams.rs rename to subxt/src/backend/legacy/descendant_streams.rs diff --git a/subxt/src/backend/mod.rs b/subxt/src/backend/mod.rs deleted file mode 100644 index 95395a2a29..0000000000 --- a/subxt/src/backend/mod.rs +++ /dev/null @@ -1,1072 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module exposes a backend trait for Subxt which allows us to get and set -//! the necessary information (probably from a JSON-RPC API, but that's up to the -//! implementation). - -pub mod chain_head; -pub mod legacy; -pub mod utils; - -use crate::config::{Config, HashFor}; -use crate::error::BackendError; -use async_trait::async_trait; -use codec::{Decode, Encode}; -use futures::{Stream, StreamExt}; -use std::pin::Pin; -use std::sync::Arc; -use subxt_core::client::RuntimeVersion; -use subxt_metadata::Metadata; - -/// Some re-exports from the [`subxt_rpcs`] crate, also accessible in full via [`crate::ext::subxt_rpcs`]. -pub mod rpc { - pub use subxt_rpcs::client::{RawRpcFuture, RawRpcSubscription, RawValue, RpcParams}; - pub use subxt_rpcs::{RpcClient, RpcClientT, rpc_params}; - - crate::macros::cfg_reconnecting_rpc_client! { - /// An RPC client that automatically reconnects. - /// - /// # Example - /// - /// ```rust,no_run,standalone_crate - /// use std::time::Duration; - /// use futures::StreamExt; - /// use subxt::backend::rpc::reconnecting_rpc_client::{RpcClient, ExponentialBackoff}; - /// use subxt::{OnlineClient, PolkadotConfig}; - /// - /// #[tokio::main] - /// async fn main() { - /// let rpc = RpcClient::builder() - /// .retry_policy(ExponentialBackoff::from_millis(100).max_delay(Duration::from_secs(10))) - /// .build("ws://localhost:9944".to_string()) - /// .await - /// .unwrap(); - /// - /// let subxt_client: OnlineClient = OnlineClient::from_rpc_client(rpc.clone()).await.unwrap(); - /// let mut blocks_sub = subxt_client.blocks().subscribe_finalized().await.unwrap(); - /// - /// while let Some(block) = blocks_sub.next().await { - /// let block = match block { - /// Ok(b) => b, - /// Err(e) => { - /// if e.is_disconnected_will_reconnect() { - /// println!("The RPC connection was lost and we may have missed a few blocks"); - /// continue; - /// } else { - /// panic!("Error: {}", e); - /// } - /// } - /// }; - /// println!("Block #{} ({})", block.number(), block.hash()); - /// } - /// } - /// ``` - pub use subxt_rpcs::client::reconnecting_rpc_client; - } -} - -/// Prevent the backend trait being implemented externally. -#[doc(hidden)] -pub(crate) mod sealed { - pub trait Sealed {} -} - -/// This trait exposes the interface that Subxt will use to communicate with -/// a backend. Its goal is to be as minimal as possible. -#[async_trait] -pub trait Backend: sealed::Sealed + Send + Sync + 'static { - /// Fetch values from storage. - async fn storage_fetch_values( - &self, - keys: Vec>, - at: HashFor, - ) -> Result, BackendError>; - - /// Fetch keys underneath the given key from storage. - async fn storage_fetch_descendant_keys( - &self, - key: Vec, - at: HashFor, - ) -> Result>, BackendError>; - - /// Fetch values underneath the given key from storage. - async fn storage_fetch_descendant_values( - &self, - key: Vec, - at: HashFor, - ) -> Result, BackendError>; - - /// Fetch the genesis hash - async fn genesis_hash(&self) -> Result, BackendError>; - - /// Get a block header - async fn block_header(&self, at: HashFor) -> Result, BackendError>; - - /// Return the extrinsics found in the block. Each extrinsic is represented - /// by a vector of bytes which has _not_ been SCALE decoded (in other words, the - /// first bytes in the vector will decode to the compact encoded length of the extrinsic) - async fn block_body(&self, at: HashFor) -> Result>>, BackendError>; - - /// Get the most recent finalized block hash. - /// Note: needed only in blocks client for finalized block stream; can prolly be removed. - async fn latest_finalized_block_ref(&self) -> Result>, BackendError>; - - /// Get information about the current runtime. - async fn current_runtime_version(&self) -> Result; - - /// A stream of all new runtime versions as they occur. - async fn stream_runtime_version(&self) - -> Result, BackendError>; - - /// A stream of all new block headers as they arrive. - async fn stream_all_block_headers( - &self, - hasher: T::Hasher, - ) -> Result>)>, BackendError>; - - /// A stream of best block headers. - async fn stream_best_block_headers( - &self, - hasher: T::Hasher, - ) -> Result>)>, BackendError>; - - /// A stream of finalized block headers. - async fn stream_finalized_block_headers( - &self, - hasher: T::Hasher, - ) -> Result>)>, BackendError>; - - /// Submit a transaction. This will return a stream of events about it. - async fn submit_transaction( - &self, - bytes: &[u8], - ) -> Result>>, BackendError>; - - /// Make a call to some runtime API. - async fn call( - &self, - method: &str, - call_parameters: Option<&[u8]>, - at: HashFor, - ) -> Result, BackendError>; -} - -/// helpful utility methods derived from those provided on [`Backend`] -#[async_trait] -pub trait BackendExt: Backend { - /// Fetch a single value from storage. - async fn storage_fetch_value( - &self, - key: Vec, - at: HashFor, - ) -> Result>, BackendError> { - self.storage_fetch_values(vec![key], at) - .await? - .next() - .await - .transpose() - .map(|o| o.map(|s| s.value)) - } - - /// The same as a [`Backend::call()`], but it will also attempt to decode the - /// result into the given type, which is a fairly common operation. - async fn call_decoding( - &self, - method: &str, - call_parameters: Option<&[u8]>, - at: HashFor, - ) -> Result { - let bytes = self.call(method, call_parameters, at).await?; - let res = - D::decode(&mut &*bytes).map_err(BackendError::CouldNotScaleDecodeRuntimeResponse)?; - Ok(res) - } - - /// Return the metadata at some version. - async fn metadata_at_version( - &self, - version: u32, - at: HashFor, - ) -> Result { - let param = version.encode(); - - let opaque: Option = self - .call_decoding("Metadata_metadata_at_version", Some(¶m), at) - .await?; - let Some(opaque) = opaque else { - return Err(BackendError::MetadataVersionNotFound(version)); - }; - - let metadata: Metadata = - Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?; - Ok(metadata) - } - - /// Return V14 metadata from the legacy `Metadata_metadata` call. - async fn legacy_metadata(&self, at: HashFor) -> Result { - let opaque: frame_metadata::OpaqueMetadata = - self.call_decoding("Metadata_metadata", None, at).await?; - let metadata: Metadata = - Decode::decode(&mut &opaque.0[..]).map_err(BackendError::CouldNotDecodeMetadata)?; - Ok(metadata) - } -} - -#[async_trait] -impl + ?Sized, T: Config> BackendExt for B {} - -/// An opaque struct which, while alive, indicates that some references to a block -/// still exist. This gives the backend the opportunity to keep the corresponding block -/// details around for a while if it likes and is able to. No guarantees can be made about -/// how long the corresponding details might be available for, but if no references to a block -/// exist, then the backend is free to discard any details for it. -#[derive(Clone)] -pub struct BlockRef { - hash: H, - // We keep this around so that when it is dropped, it has the - // opportunity to tell the backend. - _pointer: Option>, -} - -impl From for BlockRef { - fn from(value: H) -> Self { - BlockRef::from_hash(value) - } -} - -impl PartialEq for BlockRef { - fn eq(&self, other: &Self) -> bool { - self.hash == other.hash - } -} -impl Eq for BlockRef {} - -// Manual implementation to work around https://github.com/mcarton/rust-derivative/issues/115. -impl PartialOrd for BlockRef { - fn partial_cmp(&self, other: &Self) -> Option { - self.hash.partial_cmp(&other.hash) - } -} - -impl Ord for BlockRef { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.hash.cmp(&other.hash) - } -} - -impl std::fmt::Debug for BlockRef { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("BlockRef").field(&self.hash).finish() - } -} - -impl std::hash::Hash for BlockRef { - fn hash(&self, state: &mut Hasher) { - self.hash.hash(state); - } -} - -impl BlockRef { - /// A [`BlockRef`] that doesn't reference a given block, but does have an associated hash. - /// This is used in the legacy backend, which has no notion of pinning blocks. - pub fn from_hash(hash: H) -> Self { - Self { - hash, - _pointer: None, - } - } - /// Construct a [`BlockRef`] from an instance of the underlying trait. It's expected - /// that the [`Backend`] implementation will call this if it wants to track which blocks - /// are potentially in use. - pub fn new(hash: H, inner: P) -> Self { - Self { - hash, - _pointer: Some(Arc::new(inner)), - } - } - - /// Return the hash of the referenced block. - pub fn hash(&self) -> H - where - H: Copy, - { - self.hash - } -} - -/// A trait that a [`Backend`] can implement to know when some block -/// can be unpinned: when this is dropped, there are no remaining references -/// to the block that it's associated with. -pub trait BlockRefT: Send + Sync + 'static {} - -/// A stream of some item. -pub struct StreamOf(Pin + Send + 'static>>); - -impl Stream for StreamOf { - type Item = T; - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - self.0.poll_next_unpin(cx) - } -} - -impl std::fmt::Debug for StreamOf { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("StreamOf").field(&"").finish() - } -} - -impl StreamOf { - /// Construct a new stream. - pub fn new(inner: Pin + Send + 'static>>) -> Self { - StreamOf(inner) - } - - /// Returns the next item in the stream. This is just a wrapper around - /// [`StreamExt::next()`] so that you can avoid the extra import. - pub async fn next(&mut self) -> Option { - StreamExt::next(self).await - } -} - -/// A stream of [`Result`]. -pub type StreamOfResults = StreamOf>; - -/// The status of the transaction. -/// -/// If the status is [`TransactionStatus::InFinalizedBlock`], [`TransactionStatus::Error`], -/// [`TransactionStatus::Invalid`] or [`TransactionStatus::Dropped`], then no future -/// events will be emitted. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum TransactionStatus { - /// Transaction is part of the future queue. - Validated, - /// The transaction has been broadcast to other nodes. - Broadcasted, - /// Transaction is no longer in a best block. - NoLongerInBestBlock, - /// Transaction has been included in block with given hash. - InBestBlock { - /// Block hash the transaction is in. - hash: BlockRef, - }, - /// Transaction has been finalized by a finality-gadget, e.g GRANDPA - InFinalizedBlock { - /// Block hash the transaction is in. - hash: BlockRef, - }, - /// Something went wrong in the node. - Error { - /// Human readable message; what went wrong. - message: String, - }, - /// Transaction is invalid (bad nonce, signature etc). - Invalid { - /// Human readable message; why was it invalid. - message: String, - }, - /// The transaction was dropped. - Dropped { - /// Human readable message; why was it dropped. - message: String, - }, -} - -/// A response from calls like [`Backend::storage_fetch_values`] or -/// [`Backend::storage_fetch_descendant_values`]. -#[derive(serde::Serialize, serde::Deserialize, Clone, PartialEq, Debug)] -pub struct StorageResponse { - /// The key. - pub key: Vec, - /// The associated value. - pub value: Vec, -} - -#[cfg(test)] -mod test { - use super::*; - use crate::backend::StorageResponse; - use core::convert::Infallible; - use futures::StreamExt; - use primitive_types::H256; - use rpc::RpcClientT; - use std::collections::{HashMap, VecDeque}; - use subxt_core::{Config, config::DefaultExtrinsicParams}; - use subxt_rpcs::client::{ - MockRpcClient, - mock_rpc_client::{Json, MockRpcClientBuilder}, - }; - - fn random_hash() -> H256 { - H256::random() - } - - fn disconnected_will_reconnect() -> subxt_rpcs::Error { - subxt_rpcs::Error::DisconnectedWillReconnect("..".into()) - } - - fn storage_response>, V: Into>>(key: K, value: V) -> StorageResponse - where - Vec: From, - { - StorageResponse { - key: key.into(), - value: value.into(), - } - } - - // Define dummy config - enum Conf {} - impl Config for Conf { - type AccountId = crate::utils::AccountId32; - type Address = crate::utils::MultiAddress; - type Signature = crate::utils::MultiSignature; - type Hasher = crate::config::substrate::BlakeTwo256; - type Header = crate::config::substrate::SubstrateHeader; - type ExtrinsicParams = DefaultExtrinsicParams; - type AssetId = u32; - } - - mod legacy { - use super::*; - use crate::{ - backend::legacy::{LegacyBackend, rpc_methods::RuntimeVersion}, - error::RpcError, - }; - - use crate::backend::Backend; - - fn client_runtime_version(num: u32) -> crate::client::RuntimeVersion { - crate::client::RuntimeVersion { - spec_version: num, - transaction_version: num, - } - } - - fn runtime_version(num: u32) -> RuntimeVersion { - RuntimeVersion { - spec_version: num, - transaction_version: num, - other: HashMap::new(), - } - } - - #[tokio::test] - async fn storage_fetch_values() { - // Map from storage key to responses, given out in order, when that key is requested. - let mut values: HashMap<&str, VecDeque<_>> = HashMap::from_iter([ - ( - "ID1", - VecDeque::from_iter([ - Err(disconnected_will_reconnect()), - Ok(Json(hex::encode("Data1"))), - ]), - ), - ( - "ID2", - VecDeque::from_iter([ - Err(disconnected_will_reconnect()), - Ok(Json(hex::encode("Data2"))), - ]), - ), - ("ID3", VecDeque::from_iter([Ok(Json(hex::encode("Data3")))])), - ]); - - let rpc_client = MockRpcClient::builder() - .method_handler("state_getStorage", move |params| { - // Decode the storage key as first item from sequence of params: - let params = params.map(|p| p.get().to_string()); - let rpc_params = jsonrpsee::types::Params::new(params.as_deref()); - let key: sp_core::Bytes = rpc_params.sequence().next().unwrap(); - let key = std::str::from_utf8(&key.0).unwrap(); - // Fetch the response to use from our map, popping it from the front. - let values = values.get_mut(key).unwrap(); - let value = values.pop_front().unwrap(); - async move { value } - }) - .build(); - - // Test - let backend: LegacyBackend = LegacyBackend::builder().build(rpc_client); - - let response = backend - .storage_fetch_values( - ["ID1".into(), "ID2".into(), "ID3".into()].into(), - random_hash(), - ) - .await - .unwrap(); - - let response = response - .map(|x| x.unwrap()) - .collect::>() - .await; - - let expected = vec![ - storage_response("ID1", "Data1"), - storage_response("ID2", "Data2"), - storage_response("ID3", "Data3"), - ]; - - assert_eq!(expected, response) - } - - #[tokio::test] - async fn storage_fetch_value() { - let rpc_client = MockRpcClient::builder() - .method_handler_once("state_getStorage", async move |_params| { - // Return "disconnected" error on first call - Err::(disconnected_will_reconnect()) - }) - .method_handler_once("state_getStorage", async move |_param| { - // Return some hex encoded storage value on the next one - Json(hex::encode("Data1")) - }) - .build(); - - // Test - let backend: LegacyBackend = LegacyBackend::builder().build(rpc_client); - let response = backend - .storage_fetch_value("ID1".into(), random_hash()) - .await - .unwrap(); - - let response = response.unwrap(); - assert_eq!("Data1".to_owned(), String::from_utf8(response).unwrap()) - } - - /// This test should cover the logic of the following methods: - /// - `genesis_hash` - /// - `block_header` - /// - `block_body` - /// - `latest_finalized_block` - /// - `current_runtime_version` - /// - `current_runtime_version` - /// - `call` - /// The test covers them because they follow the simple pattern of: - /// ```rust,no_run,standalone_crate - /// async fn THE_THING(&self) -> Result, BackendError> { - /// retry(|| ).await - /// } - /// ``` - #[tokio::test] - async fn simple_fetch() { - let hash = random_hash(); - let rpc_client = MockRpcClient::builder() - .method_handler_once("chain_getBlockHash", async move |_params| { - // Return "disconnected" error on first call - Err::(disconnected_will_reconnect()) - }) - .method_handler_once("chain_getBlockHash", async move |_params| { - // Return the blockhash on next call - Json(hash) - }) - .build(); - - // Test - let backend: LegacyBackend = LegacyBackend::builder().build(rpc_client); - let response = backend.genesis_hash().await.unwrap(); - - assert_eq!(hash, response) - } - - /// This test should cover the logic of the following methods: - /// - `stream_runtime_version` - /// - `stream_all_block_headers` - /// - `stream_best_block_headers` - /// The test covers them because they follow the simple pattern of: - /// ```rust,no_run,standalone_crate - /// async fn stream_the_thing( - /// &self, - /// ) -> Result>)>, BackendError> { - /// let methods = self.methods.clone(); - /// let retry_sub = retry_stream(move || { - /// let methods = methods.clone(); - /// Box::pin(async move { - /// methods.do_the_thing().await? - /// }); - /// Ok(StreamOf(Box::pin(sub))) - /// }) - /// }) - /// .await?; - /// Ok(retry_sub) - /// } - /// ``` - #[tokio::test] - async fn stream_simple() { - // Each time the subscription is called, it will pop the first set - // of values from this and return them one after the other. - let mut data = VecDeque::from_iter([ - vec![ - Ok(Json(runtime_version(0))), - Err(disconnected_will_reconnect()), - Ok(Json(runtime_version(1))), - ], - vec![ - Err(disconnected_will_reconnect()), - Ok(Json(runtime_version(2))), - Ok(Json(runtime_version(3))), - ], - vec![ - Ok(Json(runtime_version(4))), - Ok(Json(runtime_version(5))), - Err(subxt_rpcs::Error::Client("..".into())), - ], - ]); - - let rpc_client = MockRpcClient::builder() - .subscription_handler("state_subscribeRuntimeVersion", move |_params, _unsub| { - let res = data.pop_front().unwrap(); - async move { res } - }) - .build(); - - // Test - let backend: LegacyBackend = LegacyBackend::builder().build(rpc_client); - let mut results = backend.stream_runtime_version().await.unwrap(); - - assert_eq!( - results.next().await.unwrap().unwrap(), - client_runtime_version(0) - ); - assert_eq!( - results.next().await.unwrap().unwrap(), - client_runtime_version(4) - ); - assert_eq!( - results.next().await.unwrap().unwrap(), - client_runtime_version(5) - ); - assert!(matches!( - results.next().await.unwrap(), - Err(BackendError::Rpc(RpcError::ClientError( - subxt_rpcs::Error::Client(_) - ))) - )); - assert!(results.next().await.is_none()); - } - } - - mod unstable_backend { - use subxt_rpcs::methods::chain_head::{ - self, Bytes, Initialized, MethodResponse, MethodResponseStarted, OperationError, - OperationId, OperationStorageItems, RuntimeSpec, RuntimeVersionEvent, - }; - use tokio::select; - - use super::chain_head::*; - use super::*; - - fn build_backend( - rpc_client: impl RpcClientT, - ) -> (ChainHeadBackend, ChainHeadBackendDriver) { - let (backend, driver): (ChainHeadBackend, _) = - ChainHeadBackend::builder().build(rpc_client); - (backend, driver) - } - - fn build_backend_spawn_background(rpc_client: impl RpcClientT) -> ChainHeadBackend { - ChainHeadBackend::builder().build_with_background_driver(rpc_client) - } - - fn runtime_spec() -> RuntimeSpec { - let spec = serde_json::json!({ - "specName": "westend", - "implName": "parity-westend", - "specVersion": 9122, - "implVersion": 0, - "transactionVersion": 7, - "apis": { - "0xdf6acb689907609b": 3, - "0x37e397fc7c91f5e4": 1, - "0x40fe3ad401f8959a": 5, - "0xd2bc9897eed08f15": 3, - "0xf78b278be53f454c": 2, - "0xaf2c0297a23e6d3d": 1, - "0x49eaaf1b548a0cb0": 1, - "0x91d5df18b0d2cf58": 1, - "0xed99c5acb25eedf5": 3, - "0xcbca25e39f142387": 2, - "0x687ad44ad37f03c2": 1, - "0xab3c0572291feb8b": 1, - "0xbc9d89904f5b923f": 1, - "0x37c8bb1350a9a2a8": 1 - } - }); - serde_json::from_value(spec).expect("Mock runtime spec should be the right shape") - } - - type FollowEvent = chain_head::FollowEvent>; - - /// Build a mock client which can handle `chainHead_v1_follow` subscriptions. - /// Messages from the provided receiver are sent to the latest active subscription. - fn mock_client_builder( - recv: tokio::sync::mpsc::UnboundedReceiver, - ) -> MockRpcClientBuilder { - mock_client_builder_with_ids(recv, 0..) - } - - fn mock_client_builder_with_ids( - recv: tokio::sync::mpsc::UnboundedReceiver, - ids: I, - ) -> MockRpcClientBuilder - where - I: IntoIterator + Send, - I::IntoIter: Send + Sync + 'static, - { - use subxt_rpcs::client::mock_rpc_client::AndThen; - use subxt_rpcs::{Error, UserError}; - - let recv = Arc::new(tokio::sync::Mutex::new(recv)); - let mut ids = ids.into_iter(); - - MockRpcClient::builder().subscription_handler( - "chainHead_v1_follow", - move |_params, _unsub| { - let recv = recv.clone(); - let id = ids.next(); - - // For each new follow subscription, we take messages from `recv` and pipe them to the output - // for the subscription (after an Initialized event). if the output is dropped/closed, we stop pulling - // messages from `recv`, waiting for a new chainHEad_v1_follow subscription. - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - tokio::spawn(async move { - let mut recv_guard = recv.lock().await; - loop { - select! { - // Channel closed, so stop pulling from `recv`. - _ = tx.closed() => { - break - }, - // Relay messages from `recv` unless some error sending. - Some(msg) = recv_guard.recv() => { - if tx.send(Json(msg)).is_err() { - break - } - } - } - } - }); - - async move { - if let Some(id) = id { - let follow_event = - FollowEvent::Initialized(Initialized::> { - finalized_block_hashes: vec![random_hash()], - finalized_block_runtime: Some(chain_head::RuntimeEvent::Valid( - RuntimeVersionEvent { - spec: runtime_spec(), - }, - )), - }); - - let res = AndThen( - // First send an initialized event with new ID - (vec![Json(follow_event)], subscription_id(id)), - // Next, send any events provided via the recv channel - rx, - ); - - Ok(res) - } else { - // Ran out of subscription IDs; return an error. - Err(Error::User(UserError::method_not_found())) - } - } - }, - ) - } - - fn subscription_id(id: usize) -> String { - format!("chainHeadFollowSubscriptionId{id}") - } - - fn response_started(id: &str) -> MethodResponse { - MethodResponse::Started(MethodResponseStarted { - operation_id: id.to_owned(), - discarded_items: None, - }) - } - - fn operation_error(id: &str) -> FollowEvent { - FollowEvent::OperationError(OperationError { - operation_id: id.to_owned(), - error: "error".to_owned(), - }) - } - - fn limit_reached() -> MethodResponse { - MethodResponse::LimitReached - } - - fn storage_done(id: &str) -> FollowEvent { - FollowEvent::OperationStorageDone(OperationId { - operation_id: id.to_owned(), - }) - } - fn storage_result(key: &str, value: &str) -> chain_head::StorageResult { - chain_head::StorageResult { - key: Bytes(key.to_owned().into()), - result: chain_head::StorageResultType::Value(Bytes(value.to_owned().into())), - } - } - fn storage_items(id: &str, items: &[chain_head::StorageResult]) -> FollowEvent { - FollowEvent::OperationStorageItems(OperationStorageItems { - operation_id: id.to_owned(), - items: VecDeque::from(items.to_owned()), - }) - } - - fn operation_continue(id: &str) -> FollowEvent { - FollowEvent::OperationWaitingForContinue(OperationId { - operation_id: id.to_owned(), - }) - } - - fn follow_event_stop() -> FollowEvent { - FollowEvent::Stop - } - - #[tokio::test] - async fn storage_fetch_values_returns_stream_with_single_error() { - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - - let rpc_client = mock_client_builder(rx) - .method_handler_once("chainHead_v1_storage", move |_params| { - tokio::spawn(async move { - // Wait a little and then send an error response on the - // chainHead_follow subscription: - tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; - tx.send(operation_error("Id1")).unwrap(); - }); - - async move { Json(response_started("Id1")) } - }) - .build(); - - let backend = build_backend_spawn_background(rpc_client); - - // Test - // This request should encounter an error. - let mut response = backend - .storage_fetch_values( - ["ID1".into(), "ID2".into(), "ID3".into()].into(), - random_hash(), - ) - .await - .unwrap(); - - assert!( - response - .next() - .await - .unwrap() - .is_err_and(|e| matches!(e, BackendError::Other(e) if e == "error")) - ); - assert!(response.next().await.is_none()); - } - - /// Tests that the method will retry on failed query - #[tokio::test] - async fn storage_fetch_values_retry_query() { - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - - let rpc_client = mock_client_builder(rx) - .method_handler_once("chainHead_v1_storage", async move |_params| { - // First call; return DisconnectedWillReconnect - Err::(disconnected_will_reconnect()) - }) - .method_handler_once("chainHead_v1_storage", async move |_params| { - // Otherwise, return that we'll start sending a response, and spawn - // task to send the relevant response via chainHead_follow. - tokio::spawn(async move { - tx.send(storage_items( - "Id1", - &[ - storage_result("ID1", "Data1"), - storage_result("ID2", "Data2"), - storage_result("ID3", "Data3"), - ], - )) - .unwrap(); - - tx.send(storage_done("Id1")).unwrap(); - }); - - Ok(Json(response_started("Id1"))) - }) - .build(); - - // Despite DisconnectedWillReconnect we try again transparently - // and get the data we asked for. - let backend = build_backend_spawn_background(rpc_client); - let response = backend - .storage_fetch_values( - ["ID1".into(), "ID2".into(), "ID3".into()].into(), - random_hash(), - ) - .await - .unwrap(); - - let response = response - .map(|x| x.unwrap()) - .collect::>() - .await; - - assert_eq!( - vec![ - storage_response("ID1", "Data1"), - storage_response("ID2", "Data2"), - storage_response("ID3", "Data3"), - ], - response - ) - } - - #[tokio::test] - async fn storage_fetch_values_retry_chainhead_continue() { - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - let tx2 = tx.clone(); - - let rpc_client = mock_client_builder(rx) - .method_handler_once("chainHead_v1_storage", async move |_params| { - // First call; return DisconnectedWillReconnect - Err::(disconnected_will_reconnect()) - }) - .method_handler_once("chainHead_v1_storage", async move |_params| { - // Next call, return a storage item and then a "waiting for continue". - tokio::spawn(async move { - tx.send(storage_items("Id1", &[storage_result("ID1", "Data1")])) - .unwrap(); - tx.send(operation_continue("Id1")).unwrap(); - }); - Ok(Json(response_started("Id1"))) - }) - .method_handler_once("chainHead_v1_continue", async move |_params| { - // First call; return DisconnectedWillReconnect - Err::(disconnected_will_reconnect()) - }) - .method_handler_once("chainHead_v1_continue", async move |_params| { - // Next call; acknowledge the "continue" and return remaining storage items. - tokio::spawn(async move { - tx2.send(storage_items("Id1", &[storage_result("ID2", "Data2")])) - .unwrap(); - tx2.send(storage_items("Id1", &[storage_result("ID3", "Data3")])) - .unwrap(); - tx2.send(storage_done("Id1")).unwrap(); - }); - Ok(Json(())) - }) - .build(); - - let backend = build_backend_spawn_background(rpc_client); - - // We should success, transparently handling `continue`s and `DisconnectWillReconnects`. - let response = backend - .storage_fetch_values( - ["ID1".into(), "ID2".into(), "ID3".into()].into(), - random_hash(), - ) - .await - .unwrap(); - - let response = response - .map(|x| x.unwrap()) - .collect::>() - .await; - - assert_eq!( - vec![ - storage_response("ID1", "Data1"), - storage_response("ID2", "Data2"), - storage_response("ID3", "Data3"), - ], - response - ) - } - - #[tokio::test] - async fn simple_fetch() { - let hash = random_hash(); - let (_tx, rx) = tokio::sync::mpsc::unbounded_channel(); - let rpc_client = mock_client_builder(rx) - .method_handler_once("chainSpec_v1_genesisHash", async move |_params| { - // First call, return disconnected error. - Err::(disconnected_will_reconnect()) - }) - .method_handler_once("chainSpec_v1_genesisHash", async move |_params| { - // Next call, return the hash. - Ok(Json(hash)) - }) - .build(); - - // Test - // This request should encounter an error on `request` and do a retry. - let backend = build_backend_spawn_background(rpc_client); - let response_hash = backend.genesis_hash().await.unwrap(); - - assert_eq!(hash, response_hash) - } - - // Check that the backend will resubscribe on Stop, and handle a change in subscription ID. - // see https://github.com/paritytech/subxt/issues/1567 - #[tokio::test] - async fn stale_subscription_id_failure() { - let (tx, rx) = tokio::sync::mpsc::unbounded_channel(); - let rpc_client = mock_client_builder_with_ids(rx, [1, 2]) - .method_handler("chainHead_v1_storage", move |params| { - // Decode the follow subscription ID which is the first param. - let this_sub_id = { - let params = params.as_ref().map(|p| p.get()); - let rpc_params = jsonrpsee::types::Params::new(params); - rpc_params.sequence().next::().unwrap() - }; - - // While it's equal to `subscription_id(1)`, it means we are seeing the first - // chainHead_follow subscription ID. error until we see an updated ID. - let is_wrong_sub_id = this_sub_id == subscription_id(1); - - async move { - if is_wrong_sub_id { - Json(limit_reached()) - } else { - Json(response_started("some_id")) - } - } - }) - .build(); - - let (backend, mut driver): (ChainHeadBackend, _) = build_backend(rpc_client); - - // Send a "FollowEvent::Stop" via chainhead_follow, and advance the driver just enough - // that this message has been processed. - tx.send(follow_event_stop()).unwrap(); - let _ = driver.next().await.unwrap(); - - // If we make a storage call at this point, we'll still be passing the "old" subscription - // ID, because the driver hasn't advanced enough to start a new chainhead_follow subscription, - // and will therefore fail with a "limit reached" response (to emulate what would happen if - // the chainHead_v1_storage call was made with the wrong subscription ID). - let response = backend - .storage_fetch_values(["ID1".into()].into(), random_hash()) - .await; - assert!(matches!(response, Err(e) if e.is_rpc_limit_reached())); - - // Advance the driver until a new chainHead_follow subscription has been started up. - let _ = driver.next().await.unwrap(); - let _ = driver.next().await.unwrap(); - let _ = driver.next().await.unwrap(); - - // Now, the ChainHeadBackend will use a new subscription ID and work. (If the driver - // advanced in the background automatically, this would happen automatically for us). - let response = backend - .storage_fetch_values(["ID1".into()].into(), random_hash()) - .await; - assert!(response.is_ok()); - } - } -} diff --git a/subxt/src/backend/utils.rs b/subxt/src/backend/utils.rs index 5ead7056f9..d687c734b8 100644 --- a/subxt/src/backend/utils.rs +++ b/subxt/src/backend/utils.rs @@ -1,89 +1,24 @@ -//! RPC utils. +//! Backend utils. use super::{StreamOf, StreamOfResults}; use crate::error::BackendError; -use futures::future::BoxFuture; use futures::{FutureExt, Stream, StreamExt}; use std::{future::Future, pin::Pin, task::Poll}; -/// Resubscribe callback. -type ResubscribeGetter = Box ResubscribeFuture + Send>; - -/// Future that resolves to a subscription stream. -type ResubscribeFuture = - Pin, BackendError>> + Send>>; - -pub(crate) enum PendingOrStream { - Pending(BoxFuture<'static, Result, BackendError>>), - Stream(StreamOfResults), -} - -impl std::fmt::Debug for PendingOrStream { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - PendingOrStream::Pending(_) => write!(f, "Pending"), - PendingOrStream::Stream(_) => write!(f, "Stream"), - } - } -} - -/// Retry subscription. -struct RetrySubscription { - resubscribe: ResubscribeGetter, - state: Option>, -} - -impl std::marker::Unpin for RetrySubscription {} - -impl Stream for RetrySubscription { - type Item = Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - loop { - let Some(mut this) = self.state.take() else { - return Poll::Ready(None); - }; - - match this { - PendingOrStream::Stream(ref mut s) => match s.poll_next_unpin(cx) { - Poll::Ready(Some(Err(err))) => { - if err.is_disconnected_will_reconnect() { - self.state = Some(PendingOrStream::Pending((self.resubscribe)())); - } - return Poll::Ready(Some(Err(err))); - } - Poll::Ready(None) => return Poll::Ready(None), - Poll::Ready(Some(Ok(val))) => { - self.state = Some(this); - return Poll::Ready(Some(Ok(val))); - } - Poll::Pending => { - self.state = Some(this); - return Poll::Pending; - } - }, - PendingOrStream::Pending(mut fut) => match fut.poll_unpin(cx) { - Poll::Ready(Ok(stream)) => { - self.state = Some(PendingOrStream::Stream(stream)); - continue; - } - Poll::Ready(Err(err)) => { - if err.is_disconnected_will_reconnect() { - self.state = Some(PendingOrStream::Pending((self.resubscribe)())); - } - return Poll::Ready(Some(Err(err))); - } - Poll::Pending => { - self.state = Some(PendingOrStream::Pending(fut)); - return Poll::Pending; - } - }, - }; - } - } +/// Spawn a task. +/// +/// - On non-wasm targets, this will spawn a task via [`tokio::spawn`]. +/// - On wasm targets, this will spawn a task via [`wasm_bindgen_futures::spawn_local`]. +#[cfg(feature = "runtime")] +pub(crate) fn spawn(future: F) { + #[cfg(not(target_family = "wasm"))] + tokio::spawn(async move { + future.await; + }); + #[cfg(all(target_arch = "wasm32", target_os = "unknown"))] + wasm_bindgen_futures::spawn_local(async move { + future.await; + }); } /// Retry a future until it doesn't return a disconnected error. @@ -164,25 +99,95 @@ where /// }).await; /// } /// ``` -pub async fn retry_stream(sub_stream: F) -> Result, BackendError> +pub async fn retry_stream(get_stream: F) -> Result, BackendError> where - F: FnMut() -> ResubscribeFuture + Send + 'static + Clone, + F: Clone + Send + 'static + FnMut() -> Fut, + Fut: Future, BackendError>> + Send, R: Send + 'static, { - let stream = retry(sub_stream.clone()).await?; - - let resubscribe = Box::new(move || { - let sub_stream = sub_stream.clone(); - async move { retry(sub_stream).await }.boxed() - }); + // This returns the stream. On disconnect this is called again. + let get_stream_with_retry = move || { + let get_stream = get_stream.clone(); + async move { retry(get_stream).await }.boxed() + }; // The extra Box is to encapsulate the retry subscription type Ok(StreamOf::new(Box::pin(RetrySubscription { - state: Some(PendingOrStream::Stream(stream)), - resubscribe, + state: RetrySubscriptionState::Init, + resubscribe: get_stream_with_retry, }))) } +/// Retry subscription. +struct RetrySubscription { + resubscribe: F, + state: RetrySubscriptionState, +} + +enum RetrySubscriptionState { + Init, + Pending(R), + Stream(StreamOfResults), + Done, +} + +impl std::marker::Unpin for RetrySubscription {} + +impl Stream for RetrySubscription +where + F: FnMut() -> R, + R: Future, BackendError>> + Unpin, +{ + type Item = Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> Poll> { + loop { + match &mut self.state { + RetrySubscriptionState::Init => { + self.state = RetrySubscriptionState::Pending((self.resubscribe)()); + } + RetrySubscriptionState::Stream(s) => match s.poll_next_unpin(cx) { + Poll::Ready(Some(Err(err))) => { + if err.is_disconnected_will_reconnect() { + self.state = RetrySubscriptionState::Init; + } + return Poll::Ready(Some(Err(err))); + } + Poll::Ready(None) => { + self.state = RetrySubscriptionState::Done; + return Poll::Ready(None); + } + Poll::Ready(Some(Ok(val))) => { + return Poll::Ready(Some(Ok(val))); + } + Poll::Pending => { + return Poll::Pending; + } + }, + RetrySubscriptionState::Pending(fut) => match fut.poll_unpin(cx) { + Poll::Ready(Err(err)) => { + if err.is_disconnected_will_reconnect() { + self.state = RetrySubscriptionState::Init; + } + return Poll::Ready(Some(Err(err))); + } + Poll::Ready(Ok(stream)) => { + self.state = RetrySubscriptionState::Stream(stream); + continue; + } + Poll::Pending => { + return Poll::Pending; + } + }, + RetrySubscriptionState::Done => return Poll::Ready(None), + }; + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -193,7 +198,7 @@ mod tests { } fn custom_err() -> BackendError { - BackendError::Other(String::new()) + BackendError::other("") } #[tokio::test] @@ -233,7 +238,7 @@ mod tests { }); let retry_stream = RetrySubscription { - state: Some(PendingOrStream::Stream(StreamOf::new(Box::pin(stream)))), + state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))), resubscribe, }; @@ -250,7 +255,7 @@ mod tests { let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed()); let retry_stream = RetrySubscription { - state: Some(PendingOrStream::Stream(StreamOf::new(Box::pin(stream)))), + state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))), resubscribe, }; @@ -263,7 +268,7 @@ mod tests { let resubscribe = Box::new(|| async move { Err(custom_err()) }.boxed()); let retry_stream = RetrySubscription { - state: Some(PendingOrStream::Stream(StreamOf::new(Box::pin(stream)))), + state: RetrySubscriptionState::Stream(StreamOf::new(Box::pin(stream))), resubscribe, }; diff --git a/subxt/src/blocks/block_types.rs b/subxt/src/blocks/block_types.rs deleted file mode 100644 index cac88b254a..0000000000 --- a/subxt/src/blocks/block_types.rs +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::{ - backend::BlockRef, - blocks::Extrinsics, - client::{OfflineClientT, OnlineClientT}, - config::{Config, HashFor, Header}, - error::{AccountNonceError, BlockError, EventsError, ExtrinsicError}, - events, - runtime_api::RuntimeApi, - storage::StorageClientAt, -}; - -use codec::{Decode, Encode}; -use futures::lock::Mutex as AsyncMutex; -use std::sync::Arc; - -/// A representation of a block. -pub struct Block { - header: T::Header, - block_ref: BlockRef>, - client: C, - // Since we obtain the same events for every extrinsic, let's - // cache them so that we only ever do that once: - cached_events: CachedEvents, -} - -impl Clone for Block { - fn clone(&self) -> Self { - Self { - header: self.header.clone(), - block_ref: self.block_ref.clone(), - client: self.client.clone(), - cached_events: self.cached_events.clone(), - } - } -} - -// A cache for our events so we don't fetch them more than once when -// iterating over events for extrinsics. -pub(crate) type CachedEvents = Arc>>>; - -impl Block -where - T: Config, - C: OfflineClientT, -{ - pub(crate) fn new(header: T::Header, block_ref: BlockRef>, client: C) -> Self { - Block { - header, - block_ref, - client, - cached_events: Default::default(), - } - } - - /// Return a reference to the given block. While this reference is kept alive, - /// the backend will (if possible) endeavour to keep hold of the block. - pub fn reference(&self) -> BlockRef> { - self.block_ref.clone() - } - - /// Return the block hash. - pub fn hash(&self) -> HashFor { - self.block_ref.hash() - } - - /// Return the block number. - pub fn number(&self) -> ::Number { - self.header().number() - } - - /// Return the entire block header. - pub fn header(&self) -> &T::Header { - &self.header - } -} - -impl Block -where - T: Config, - C: OnlineClientT, -{ - /// Return the events associated with the block, fetching them from the node if necessary. - pub async fn events(&self) -> Result, EventsError> { - get_events(&self.client, self.hash(), &self.cached_events).await - } - - /// Fetch and return the extrinsics in the block body. - pub async fn extrinsics(&self) -> Result, ExtrinsicError> { - let block_hash = self.hash(); - - let extrinsics = self - .client - .backend() - .block_body(block_hash) - .await - .map_err(ExtrinsicError::CannotGetBlockBody)? - .ok_or_else(|| ExtrinsicError::BlockNotFound(block_hash.into()))?; - - let extrinsics = Extrinsics::new( - self.client.clone(), - extrinsics, - self.cached_events.clone(), - block_hash, - )?; - - Ok(extrinsics) - } - - /// Work with storage. - pub fn storage(&self) -> StorageClientAt { - StorageClientAt::new(self.client.clone(), self.block_ref.clone()) - } - - /// Execute a runtime API call at this block. - pub async fn runtime_api(&self) -> RuntimeApi { - RuntimeApi::new(self.client.clone(), self.block_ref.clone()) - } - - /// Get the account nonce for a given account ID at this block. - pub async fn account_nonce(&self, account_id: &T::AccountId) -> Result { - get_account_nonce(&self.client, account_id, self.hash()) - .await - .map_err(|e| BlockError::AccountNonceError { - block_hash: self.hash().into(), - account_id: account_id.encode().into(), - reason: e, - }) - } -} - -// Return Events from the cache, or fetch from the node if needed. -pub(crate) async fn get_events( - client: &C, - block_hash: HashFor, - cached_events: &AsyncMutex>>, -) -> Result, EventsError> -where - T: Config, - C: OnlineClientT, -{ - // Acquire lock on the events cache. We either get back our events or we fetch and set them - // before unlocking, so only one fetch call should ever be made. We do this because the - // same events can be shared across all extrinsics in the block. - let mut lock = cached_events.lock().await; - let events = match &*lock { - Some(events) => events.clone(), - None => { - let events = events::EventsClient::new(client.clone()) - .at(block_hash) - .await?; - lock.replace(events.clone()); - events - } - }; - - Ok(events) -} - -// Return the account nonce at some block hash for an account ID. -pub(crate) async fn get_account_nonce( - client: &C, - account_id: &T::AccountId, - block_hash: HashFor, -) -> Result -where - C: OnlineClientT, - T: Config, -{ - let account_nonce_bytes = client - .backend() - .call( - "AccountNonceApi_account_nonce", - Some(&account_id.encode()), - block_hash, - ) - .await?; - - // custom decoding from a u16/u32/u64 into a u64, based on the number of bytes we got back. - let cursor = &mut &account_nonce_bytes[..]; - let account_nonce: u64 = match account_nonce_bytes.len() { - 2 => u16::decode(cursor)?.into(), - 4 => u32::decode(cursor)?.into(), - 8 => u64::decode(cursor)?, - _ => { - return Err(AccountNonceError::WrongNumberOfBytes( - account_nonce_bytes.len(), - )); - } - }; - Ok(account_nonce) -} diff --git a/subxt/src/blocks/blocks_client.rs b/subxt/src/blocks/blocks_client.rs deleted file mode 100644 index 87e5f556b5..0000000000 --- a/subxt/src/blocks/blocks_client.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::Block; -use crate::{ - backend::{BlockRef, StreamOfResults}, - client::OnlineClientT, - config::{Config, HashFor}, - error::BlockError, - utils::PhantomDataSendSync, -}; -use derive_where::derive_where; -use futures::StreamExt; -use std::future::Future; - -type BlockStream = StreamOfResults; -type BlockStreamRes = Result, BlockError>; - -/// A client for working with blocks. -#[derive_where(Clone; Client)] -pub struct BlocksClient { - client: Client, - _marker: PhantomDataSendSync, -} - -impl BlocksClient { - /// Create a new [`BlocksClient`]. - pub fn new(client: Client) -> Self { - Self { - client, - _marker: PhantomDataSendSync::new(), - } - } -} - -impl BlocksClient -where - T: Config, - Client: OnlineClientT, -{ - /// Obtain block details given the provided block hash. - /// - /// # Warning - /// - /// This call only supports blocks produced since the most recent - /// runtime upgrade. You can attempt to retrieve older blocks, - /// but may run into errors attempting to work with them. - pub fn at( - &self, - block_ref: impl Into>>, - ) -> impl Future, BlockError>> + Send + 'static { - self.at_or_latest(Some(block_ref.into())) - } - - /// Obtain block details of the latest finalized block. - pub fn at_latest( - &self, - ) -> impl Future, BlockError>> + Send + 'static { - self.at_or_latest(None) - } - - /// Obtain block details given the provided block hash, or the latest block if `None` is - /// provided. - fn at_or_latest( - &self, - block_ref: Option>>, - ) -> impl Future, BlockError>> + Send + 'static { - let client = self.client.clone(); - async move { - // If a block ref isn't provided, we'll get the latest finalized ref to use. - let block_ref = match block_ref { - Some(r) => r, - None => client - .backend() - .latest_finalized_block_ref() - .await - .map_err(BlockError::CouldNotGetLatestBlock)?, - }; - - let maybe_block_header = client - .backend() - .block_header(block_ref.hash()) - .await - .map_err(|e| BlockError::CouldNotGetBlockHeader { - block_hash: block_ref.hash().into(), - reason: e, - })?; - - let block_header = match maybe_block_header { - Some(header) => header, - None => { - return Err(BlockError::BlockNotFound { - block_hash: block_ref.hash().into(), - }); - } - }; - - Ok(Block::new(block_header, block_ref, client)) - } - } - - /// Subscribe to all new blocks imported by the node. - /// - /// **Note:** You probably want to use [`Self::subscribe_finalized()`] most of - /// the time. - pub fn subscribe_all( - &self, - ) -> impl Future>, BlockError>> + Send + 'static - where - Client: Send + Sync + 'static, - { - let client = self.client.clone(); - let hasher = client.hasher(); - header_sub_fut_to_block_sub(self.clone(), async move { - let stream = client - .backend() - .stream_all_block_headers(hasher) - .await - .map_err(BlockError::CouldNotSubscribeToAllBlocks)?; - BlockStreamRes::Ok(stream) - }) - } - - /// Subscribe to all new blocks imported by the node onto the current best fork. - /// - /// **Note:** You probably want to use [`Self::subscribe_finalized()`] most of - /// the time. - pub fn subscribe_best( - &self, - ) -> impl Future>, BlockError>> + Send + 'static - where - Client: Send + Sync + 'static, - { - let client = self.client.clone(); - let hasher = client.hasher(); - header_sub_fut_to_block_sub(self.clone(), async move { - let stream = client - .backend() - .stream_best_block_headers(hasher) - .await - .map_err(BlockError::CouldNotSubscribeToBestBlocks)?; - BlockStreamRes::Ok(stream) - }) - } - - /// Subscribe to finalized blocks. - pub fn subscribe_finalized( - &self, - ) -> impl Future>, BlockError>> + Send + 'static - where - Client: Send + Sync + 'static, - { - let client = self.client.clone(); - let hasher = client.hasher(); - header_sub_fut_to_block_sub(self.clone(), async move { - let stream = client - .backend() - .stream_finalized_block_headers(hasher) - .await - .map_err(BlockError::CouldNotSubscribeToFinalizedBlocks)?; - BlockStreamRes::Ok(stream) - }) - } -} - -/// Take a promise that will return a subscription to some block headers, -/// and return a subscription to some blocks based on this. -async fn header_sub_fut_to_block_sub( - blocks_client: BlocksClient, - sub: S, -) -> Result>, BlockError> -where - T: Config, - S: Future>)>, BlockError>> - + Send - + 'static, - Client: OnlineClientT + Send + Sync + 'static, -{ - let sub = sub.await?.then(move |header_and_ref| { - let client = blocks_client.client.clone(); - async move { - let (header, block_ref) = match header_and_ref { - Ok(header_and_ref) => header_and_ref, - Err(e) => return Err(e), - }; - - Ok(Block::new(header, block_ref, client)) - } - }); - BlockStreamRes::Ok(StreamOfResults::new(Box::pin(sub))) -} diff --git a/subxt/src/blocks/extrinsic_types.rs b/subxt/src/blocks/extrinsic_types.rs deleted file mode 100644 index 3be678ed58..0000000000 --- a/subxt/src/blocks/extrinsic_types.rs +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::{ - blocks::block_types::{CachedEvents, get_events}, - client::{OfflineClientT, OnlineClientT}, - config::{Config, HashFor}, - error::{EventsError, ExtrinsicDecodeErrorAt, ExtrinsicError}, - events, -}; -use derive_where::derive_where; -use scale_decode::{DecodeAsFields, DecodeAsType}; -use subxt_core::blocks::{ExtrinsicDetails as CoreExtrinsicDetails, Extrinsics as CoreExtrinsics}; - -// Re-export anything that's directly returned/used in the APIs below. -pub use subxt_core::blocks::{ - ExtrinsicTransactionExtension, ExtrinsicTransactionExtensions, StaticExtrinsic, -}; - -/// The body of a block. -pub struct Extrinsics { - inner: CoreExtrinsics, - client: C, - cached_events: CachedEvents, - hash: HashFor, -} - -impl Extrinsics -where - T: Config, - C: OfflineClientT, -{ - pub(crate) fn new( - client: C, - extrinsics: Vec>, - cached_events: CachedEvents, - hash: HashFor, - ) -> Result { - let inner = CoreExtrinsics::decode_from(extrinsics, client.metadata())?; - Ok(Self { - inner, - client, - cached_events, - hash, - }) - } - - /// See [`subxt_core::blocks::Extrinsics::len()`]. - pub fn len(&self) -> usize { - self.inner.len() - } - - /// See [`subxt_core::blocks::Extrinsics::is_empty()`]. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - /// Return the block hash that these extrinsics are from. - pub fn block_hash(&self) -> HashFor { - self.hash - } - - /// Returns an iterator over the extrinsics in the block body. - // Dev note: The returned iterator is 'static + Send so that we can box it up and make - // use of it with our `FilterExtrinsic` stuff. - pub fn iter(&self) -> impl Iterator> + Send + Sync + 'static { - let client = self.client.clone(); - let cached_events = self.cached_events.clone(); - let block_hash = self.hash; - - self.inner.iter().map(move |inner| { - ExtrinsicDetails::new(inner, client.clone(), block_hash, cached_events.clone()) - }) - } - - /// Iterate through the extrinsics using metadata to dynamically decode and skip - /// them, and return only those which should decode to the provided `E` type. - /// If an error occurs, all subsequent iterations return `None`. - pub fn find( - &self, - ) -> impl Iterator, ExtrinsicError>> { - self.inner.find::().map(|res| { - match res { - Err(e) => Err(ExtrinsicError::from(e)), - Ok(ext) => { - // Wrap details from subxt-core into what we want here: - let details = ExtrinsicDetails::new( - ext.details, - self.client.clone(), - self.hash, - self.cached_events.clone(), - ); - - Ok(FoundExtrinsic { - details, - value: ext.value, - }) - } - } - }) - } - - /// Iterate through the extrinsics using metadata to dynamically decode and skip - /// them, and return the first extrinsic found which decodes to the provided `E` type. - pub fn find_first( - &self, - ) -> Result>, ExtrinsicError> { - self.find::().next().transpose() - } - - /// Iterate through the extrinsics using metadata to dynamically decode and skip - /// them, and return the last extrinsic found which decodes to the provided `Ev` type. - pub fn find_last( - &self, - ) -> Result>, ExtrinsicError> { - self.find::().last().transpose() - } - - /// Find an extrinsics that decodes to the type provided. Returns true if it was found. - pub fn has(&self) -> Result { - Ok(self.find::().next().transpose()?.is_some()) - } -} - -/// A single extrinsic in a block. -pub struct ExtrinsicDetails { - inner: CoreExtrinsicDetails, - /// The block hash of this extrinsic (needed to fetch events). - block_hash: HashFor, - /// Subxt client. - client: C, - /// Cached events. - cached_events: CachedEvents, -} - -impl ExtrinsicDetails -where - T: Config, - C: OfflineClientT, -{ - // Attempt to dynamically decode a single extrinsic from the given input. - pub(crate) fn new( - inner: CoreExtrinsicDetails, - client: C, - block_hash: HashFor, - cached_events: CachedEvents, - ) -> ExtrinsicDetails { - ExtrinsicDetails { - inner, - client, - block_hash, - cached_events, - } - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::hash()`]. - pub fn hash(&self) -> HashFor { - self.inner.hash() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::is_signed()`]. - pub fn is_signed(&self) -> bool { - self.inner.is_signed() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::index()`]. - pub fn index(&self) -> u32 { - self.inner.index() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::bytes()`]. - pub fn bytes(&self) -> &[u8] { - self.inner.bytes() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::call_bytes()`]. - pub fn call_bytes(&self) -> &[u8] { - self.inner.call_bytes() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::field_bytes()`]. - pub fn field_bytes(&self) -> &[u8] { - self.inner.field_bytes() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::address_bytes()`]. - pub fn address_bytes(&self) -> Option<&[u8]> { - self.inner.address_bytes() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::signature_bytes()`]. - pub fn signature_bytes(&self) -> Option<&[u8]> { - self.inner.signature_bytes() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::transaction_extensions_bytes()`]. - pub fn transaction_extensions_bytes(&self) -> Option<&[u8]> { - self.inner.transaction_extensions_bytes() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::transaction_extensions()`]. - pub fn transaction_extensions(&self) -> Option> { - self.inner.transaction_extensions() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::pallet_index()`]. - pub fn pallet_index(&self) -> u8 { - self.inner.pallet_index() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::call_index()`]. - pub fn call_index(&self) -> u8 { - self.inner.call_index() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::pallet_name()`]. - pub fn pallet_name(&self) -> &str { - self.inner.pallet_name() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::call_name()`]. - pub fn call_name(&self) -> &str { - self.inner.call_name() - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::decode_as_fields()`]. - pub fn decode_as_fields(&self) -> Result { - self.inner.decode_as_fields().map_err(Into::into) - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::as_extrinsic()`]. - pub fn as_extrinsic(&self) -> Result, ExtrinsicError> { - self.inner.as_extrinsic::().map_err(Into::into) - } - - /// See [`subxt_core::blocks::ExtrinsicDetails::as_root_extrinsic()`]. - pub fn as_root_extrinsic(&self) -> Result { - self.inner.as_root_extrinsic::().map_err(Into::into) - } -} - -impl ExtrinsicDetails -where - T: Config, - C: OnlineClientT, -{ - /// The events associated with the extrinsic. - pub async fn events(&self) -> Result, EventsError> { - let events = get_events(&self.client, self.block_hash, &self.cached_events).await?; - let ext_hash = self.inner.hash(); - Ok(ExtrinsicEvents::new(ext_hash, self.index(), events)) - } -} - -/// A Static Extrinsic found in a block coupled with it's details. -pub struct FoundExtrinsic { - /// Details for the extrinsic. - pub details: ExtrinsicDetails, - /// The decoded extrinsic value. - pub value: E, -} - -/// The events associated with a given extrinsic. -#[derive_where(Debug)] -pub struct ExtrinsicEvents { - // The hash of the extrinsic (handy to expose here because - // this type is returned from TxProgress things in the most - // basic flows, so it's the only place people can access it - // without complicating things for themselves). - ext_hash: HashFor, - // The index of the extrinsic: - idx: u32, - // All of the events in the block: - events: events::Events, -} - -impl ExtrinsicEvents { - /// Creates a new instance of `ExtrinsicEvents`. - #[doc(hidden)] - pub fn new(ext_hash: HashFor, idx: u32, events: events::Events) -> Self { - Self { - ext_hash, - idx, - events, - } - } - - /// The index of the extrinsic that these events are produced from. - pub fn extrinsic_index(&self) -> u32 { - self.idx - } - - /// Return the hash of the extrinsic. - pub fn extrinsic_hash(&self) -> HashFor { - self.ext_hash - } - - /// Return all of the events in the block that the extrinsic is in. - pub fn all_events_in_block(&self) -> &events::Events { - &self.events - } - - /// Iterate over all of the raw events associated with this transaction. - /// - /// This works in the same way that [`events::Events::iter()`] does, with the - /// exception that it filters out events not related to the submitted extrinsic. - pub fn iter(&self) -> impl Iterator, EventsError>> { - self.events.iter().filter(|ev| { - ev.as_ref() - .map(|ev| ev.phase() == events::Phase::ApplyExtrinsic(self.idx)) - .unwrap_or(true) // Keep any errors. - }) - } - - /// Find all of the transaction events matching the event type provided as a generic parameter. - /// - /// This works in the same way that [`events::Events::find()`] does, with the - /// exception that it filters out events not related to the submitted extrinsic. - pub fn find(&self) -> impl Iterator> { - self.iter() - .filter_map(|ev| ev.and_then(|ev| ev.as_event::()).transpose()) - } - - /// Iterate through the transaction events using metadata to dynamically decode and skip - /// them, and return the first event found which decodes to the provided `Ev` type. - /// - /// This works in the same way that [`events::Events::find_first()`] does, with the - /// exception that it ignores events not related to the submitted extrinsic. - pub fn find_first(&self) -> Result, EventsError> { - self.find::().next().transpose() - } - - /// Iterate through the transaction events using metadata to dynamically decode and skip - /// them, and return the last event found which decodes to the provided `Ev` type. - /// - /// This works in the same way that [`events::Events::find_last()`] does, with the - /// exception that it ignores events not related to the submitted extrinsic. - pub fn find_last(&self) -> Result, EventsError> { - self.find::().last().transpose() - } - - /// Find an event in those associated with this transaction. Returns true if it was found. - /// - /// This works in the same way that [`events::Events::has()`] does, with the - /// exception that it ignores events not related to the submitted extrinsic. - pub fn has(&self) -> Result { - Ok(self.find::().next().transpose()?.is_some()) - } -} diff --git a/subxt/src/blocks/mod.rs b/subxt/src/blocks/mod.rs deleted file mode 100644 index a28b2a5919..0000000000 --- a/subxt/src/blocks/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module exposes the necessary functionality for working with events. - -mod block_types; -mod blocks_client; -mod extrinsic_types; - -/// A reference to a block. -pub use crate::backend::BlockRef; - -pub use block_types::Block; -pub use blocks_client::BlocksClient; -pub use extrinsic_types::{ - ExtrinsicDetails, ExtrinsicEvents, ExtrinsicTransactionExtension, - ExtrinsicTransactionExtensions, Extrinsics, FoundExtrinsic, StaticExtrinsic, -}; - -// We get account nonce info in tx_client, too, so re-use the logic: -pub(crate) use block_types::get_account_nonce; diff --git a/subxt/src/book/mod.rs b/subxt/src/book/mod.rs deleted file mode 100644 index 60052310ef..0000000000 --- a/subxt/src/book/mod.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -// Dev note; I used the following command to normalize and wrap comments: -// rustfmt +nightly --config wrap_comments=true,comment_width=100,normalize_comments=true subxt/src/book/custom_values -// It messed up comments in code blocks though, so be prepared to go and fix those. - -//! # The Subxt Guide -//! -//! Subxt is a library for interacting with Substrate based nodes. It has a focus on **sub**mitting -//! e**xt**rinsics, hence the name, however it's also capable of reading blocks, storage, events and -//! constants from a node. The aim of this guide is to explain key concepts and get you started with -//! using Subxt. -//! -//! 1. [Features](#features-at-a-glance) -//! 2. [Limitations](#limitations) -//! 3. [Quick start](#quick-start) -//! 4. [Usage](#usage) -//! -//! ## Features at a glance -//! -//! Here's a quick overview of the features that Subxt has to offer: -//! -//! - Subxt allows you to generate a static, type safe interface to a node given some metadata; this -//! allows you to catch many errors at compile time rather than runtime. -//! - Subxt also makes heavy use of node metadata to encode/decode the data sent to/from it. This -//! allows it to target almost any node which can output the correct metadata, and allows it some -//! flexibility in encoding and decoding things to account for cross-node differences. -//! - Subxt has a pallet-oriented interface, meaning that code you write to talk to some pallet on -//! one node will often "Just Work" when pointed at different nodes that use the same pallet. -//! - Subxt can work offline; you can generate and sign transactions, access constants from node -//! metadata and more, without a network connection. This is all checked at compile time, so you -//! can be certain it won't try to establish a network connection if you don't want it to. -//! - Subxt can forego the statically generated interface and build transactions, storage queries -//! and constant queries using data provided at runtime, rather than queries constructed -//! statically. -//! - Subxt can be compiled to WASM to run in the browser, allowing it to back Rust based browser -//! apps, or even bind to JS apps. -//! -//! ## Limitations -//! -//! In various places, you can provide a block hash to access data at a particular block, for -//! instance: -//! -//! - [`crate::storage::StorageClient::at`] -//! - [`crate::events::EventsClient::at`] -//! - [`crate::blocks::BlocksClient::at`] -//! - [`crate::runtime_api::RuntimeApiClient::at`] -//! -//! However, Subxt is (by default) only capable of properly working with blocks that were produced -//! after the most recent runtime update. This is because it uses the most recent metadata given -//! back by a node to encode and decode things. It's possible to decode older blocks produced by a -//! runtime that emits compatible (currently, V14) metadata by manually setting the metadata used by -//! the client using [`crate::client::OnlineClient::set_metadata()`]. -//! -//! Subxt does not support working with blocks produced prior to the runtime update that introduces -//! V14 metadata. It may have some success decoding older blocks using newer metadata, but may also -//! completely fail to do so. -//! -//! ## Quick start -//! -//! Here is a simple but complete example of using Subxt to transfer some tokens from the example -//! accounts, Alice to Bob: -//! -//! ```rust,ignore -#![doc = include_str!("../../examples/tx_basic.rs")] -//! ``` -//! -//! This example assumes that a Polkadot node is running locally (Subxt endeavors to support all -//! recent releases). Typically, to use Subxt to talk to some custom Substrate node (for example a -//! parachain node), you'll want to: -//! -//! 1. [Generate an interface](setup::codegen) -//! 2. [Create a config](setup::config) -//! 3. [Use the config to instantiate the client](setup::client) -//! -//! Follow the above links to learn more about each step. -//! -//! ## Usage -//! -//! Once Subxt is configured, the next step is interacting with a node. Follow the links -//! below to learn more about how to use Subxt for each of the following things: -//! -//! - [Transactions](usage::transactions): Subxt can build and submit transactions, wait until they are in -//! blocks, and retrieve the associated events. -//! - [Storage](usage::storage): Subxt can query the node storage. -//! - [Events](usage::events): Subxt can read the events emitted for recent blocks. -//! - [Constants](usage::constants): Subxt can access the constant values stored in a node, which -//! remain the same for a given runtime version. -//! - [Blocks](usage::blocks): Subxt can load recent blocks or subscribe to new/finalized blocks, -//! reading the extrinsics, events and storage at these blocks. -//! - [Runtime APIs](usage::runtime_apis): Subxt can make calls into pallet runtime APIs to retrieve -//! data. -//! - [Custom values](usage::custom_values): Subxt can access "custom values" stored in the metadata. -//! - [Raw RPC calls](usage::rpc): Subxt can be used to make raw RPC requests to compatible nodes. -//! -//! ## Examples -//! -//! Some complete, self contained examples which are not a part of this guide: -//! -//! - [`parachain-example`](https://github.com/paritytech/subxt/tree/master/examples/parachain-example) is an example -//! which uses Zombienet to spawn a parachain locally, and then connects to it using Subxt. -//! - [`wasm-example`](https://github.com/paritytech/subxt/tree/master/examples/wasm-example) is an example of writing -//! a Rust app that contains a Yew based UI, uses Subxt to interact with a chain, and compiles to WASM in order to -//! run entirely in the browser. -pub mod setup; -pub mod usage; diff --git a/subxt/src/book/setup/client.rs b/subxt/src/book/setup/client.rs deleted file mode 100644 index 52d6348e67..0000000000 --- a/subxt/src/book/setup/client.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # The Subxt client. -//! -//! The client forms the entry point to all of the Subxt APIs. Every client implements one or -//! both of [`crate::client::OfflineClientT`] and [`crate::client::OnlineClientT`]. -//! -//! Subxt ships with three clients which implement one or both of traits: -//! - An [online client](crate::client::OnlineClient). -//! - An [offline client](crate::client::OfflineClient). -//! - A light client (which is currently still unstable). -//! -//! In theory it's possible for users to implement their own clients, although this isn't generally -//! expected. -//! -//! The provided clients are all generic over the [`crate::config::Config`] that they accept, which -//! determines how they will interact with the chain. -//! -//! In the case of the [`crate::OnlineClient`], we have various ways to instantiate it: -//! -//! - [`crate::OnlineClient::new()`] to connect to a node running locally. This uses the default Subxt -//! backend, and the default RPC client. -//! - [`crate::OnlineClient::from_url()`] to connect to a node at a specific URL. This uses the default Subxt -//! backend, and the default RPC client. -//! - [`crate::OnlineClient::from_rpc_client()`] to instantiate the client with a [`crate::backend::rpc::RpcClient`]. -//! - [`crate::OnlineClient::from_backend()`] to instantiate Subxt using a custom backend. Currently there -//! is just one backend, [`crate::backend::legacy::LegacyBackend`]. This backend can be instantiated from -//! a [`crate::backend::rpc::RpcClient`]. -//! -//! [`crate::backend::rpc::RpcClient`] can itself be instantiated from anything that implements the low level -//! [`crate::backend::rpc::RpcClientT`] trait; this allows you to decide how Subxt will attempt to talk to a node -//! if you'd prefer something other default client. We use this approach under the hood to implement the light client. -//! -//! ## Examples -//! -//! Most of the other examples will instantiate a client. Here are a couple of examples for less common -//! cases. -//! -//! ### Writing a custom [`crate::backend::rpc::RpcClientT`] implementation: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/setup_client_custom_rpc.rs")] -//! ``` -//! -//! ### Creating an [`crate::OfflineClient`]: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/setup_client_offline.rs")] -//! ``` -//! diff --git a/subxt/src/book/setup/codegen.rs b/subxt/src/book/setup/codegen.rs deleted file mode 100644 index bd133b2c1b..0000000000 --- a/subxt/src/book/setup/codegen.rs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # Generating an interface -//! -//! The simplest way to use Subxt is to generate an interface to a chain that you'd like to interact -//! with. This generated interface allows you to build transactions and construct queries to access -//! data while leveraging the full type safety of the Rust compiler. -//! -//! ## The `#[subxt]` macro -//! -//! The most common way to generate the interface is to use the [`#[subxt]`](crate::subxt) macro. -//! Using this macro looks something like: -//! -//! ```rust,no_run,standalone_crate -//! #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_tiny.scale")] -//! pub mod polkadot {} -//! ``` -//! -//! The macro takes a path to some node metadata, and uses that to generate the interface you'll use -//! to talk to it. [Go here](crate::subxt) to learn more about the options available to the macro. -//! -//! To obtain this metadata you'll need for the above, you can use the `subxt` CLI tool to download it -//! from a node. The tool can be installed via `cargo`: -//! -//! ```shell -//! cargo install subxt-cli -//! ``` -//! -//! And then it can be used to fetch metadata and save it to a file: -//! -//! ```shell -//! # Download and save all of the metadata: -//! subxt metadata > metadata.scale -//! # Download and save only the pallets you want to generate an interface for: -//! subxt metadata --pallets Balances,System > metadata.scale -//! ``` -//! -//! Explicitly specifying pallets will cause the tool to strip out all unnecessary metadata and type -//! information, making the bundle much smaller in the event that you only need to generate an -//! interface for a subset of the available pallets on the node. -//! -//! ## The CLI tool -//! -//! Using the [`#[subxt]`](crate::subxt) macro carries some downsides: -//! -//! - Using it to generate an interface will have a small impact on compile times (though much less of -//! one if you only need a few pallets). -//! - IDE support for autocompletion and documentation when using the macro interface can be poor. -//! - It's impossible to manually look at the generated code to understand and debug things. -//! -//! If these are an issue, you can manually generate the same code that the macro generates under the hood -//! by using the `subxt codegen` command: -//! -//! ```shell -//! # Install the CLI tool if you haven't already: -//! cargo install subxt-cli -//! # Generate and format rust code, saving it to `interface.rs`: -//! subxt codegen | rustfmt > interface.rs -//! ``` -//! -//! Use `subxt codegen --help` for more options; many of the options available via the macro are -//! also available via the CLI tool, such as the ability to substitute generated types for others, -//! or strip out docs from the generated code. -//! diff --git a/subxt/src/book/setup/config.rs b/subxt/src/book/setup/config.rs deleted file mode 100644 index 663a02aa24..0000000000 --- a/subxt/src/book/setup/config.rs +++ /dev/null @@ -1,166 +0,0 @@ -//! # Creating a Config -//! -//! Subxt requires you to provide a type implementing [`crate::config::Config`] in order to connect to a node. -//! The [`crate::config::Config`] trait for the most part mimics the `frame_system::Config` trait. -//! For most use cases, you can just use one of the following Configs shipped with Subxt: -//! -//! - [`PolkadotConfig`](crate::config::PolkadotConfig) for talking to Polkadot nodes, and -//! - [`SubstrateConfig`](crate::config::SubstrateConfig) for talking to generic nodes built with Substrate. -//! -//! # How to create a Config for a custom chain? -//! -//! Some chains may use config that is not compatible with our [`PolkadotConfig`](crate::config::PolkadotConfig) or -//! [`SubstrateConfig`](crate::config::SubstrateConfig). -//! -//! We now walk through creating a custom [`crate::config::Config`] for a parachain, using the -//! ["Statemint"](https://parachains.info/details/statemint) parachain, also known as "Asset Hub", as an example. It -//! is currently (as of 2023-06-26) deployed on Polkadot and [Kusama (as "Statemine")](https://parachains.info/details/statemine). -//! -//! To construct a valid [`crate::config::Config`] implementation, we need to find out which types to use for `AccountId`, `Hasher`, etc. -//! For this, we need to take a look at the source code of Statemint, which is currently a part of the [Cumulus Github repository](https://github.com/paritytech/cumulus). -//! The crate defining the asset hub runtime can be found [here](https://github.com/paritytech/cumulus/tree/master/parachains/runtimes/assets/asset-hub-polkadot). -//! -//! ## `AccountId`, `Hash`, `Hasher` and `Header` -//! -//! For these config types, we need to find out where the parachain runtime implements the `frame_system::Config` trait. -//! Look for a code fragment like `impl frame_system::Config for Runtime { ... }` In the source code. -//! For Statemint it looks like [this](https://github.com/paritytech/cumulus/blob/e2b7ad2061824f490c08df27a922c64f50accd6b/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs#L179) -//! at the time of writing. The `AccountId`, `Hash` and `Header` types of the [frame_system::pallet::Config](https://docs.rs/frame-system/latest/frame_system/pallet/trait.Config.html) -//! correspond to the ones we want to use in our Subxt [crate::Config]. In the Case of Statemint (Asset Hub) they are: -//! -//! - AccountId: `sp_core::crypto::AccountId32` -//! - Hash: `sp_core::H256` -//! - Hasher (type `Hashing` in [frame_system::pallet::Config](https://docs.rs/frame-system/latest/frame_system/pallet/trait.Config.html)): `sp_runtime::traits::BlakeTwo256` -//! - Header: `sp_runtime::generic::Header` -//! -//! Subxt has its own versions of some of these types in order to avoid needing to pull in Substrate dependencies: -//! -//! - `sp_core::crypto::AccountId32` can be swapped with [`crate::utils::AccountId32`]. -//! - `sp_core::H256` is a re-export which subxt also provides as [`crate::config::substrate::H256`]. -//! - `sp_runtime::traits::BlakeTwo256` can be swapped with [`crate::config::substrate::BlakeTwo256`]. -//! - `sp_runtime::generic::Header` can be swapped with [`crate::config::substrate::SubstrateHeader`]. -//! -//! Having a look at how those types are implemented can give some clues as to how to implement other custom types that -//! you may need to use as part of your config. -//! -//! ## `Address`, `Signature` -//! -//! A Substrate runtime is typically constructed by using the [frame_support::construct_runtime](https://docs.rs/frame-support/latest/frame_support/macro.construct_runtime.html) macro. -//! In this macro, we need to specify the type of an `UncheckedExtrinsic`. Most of the time, the `UncheckedExtrinsic` will be of the type -//! `sp_runtime::generic::UncheckedExtrinsic`. -//! The generic parameters `Address` and `Signature` specified when declaring the `UncheckedExtrinsic` type -//! are the types for `Address` and `Signature` we should use with our [crate::Config] implementation. This information can -//! also be obtained from the metadata (see [`frame_metadata::v15::ExtrinsicMetadata`]). In case of Statemint (Polkadot Asset Hub) -//! we see the following types being used in `UncheckedExtrinsic`: -//! -//! - Address: `sp_runtime::MultiAddress` -//! - Signature: `sp_runtime::MultiSignature` -//! -//! As above, Subxt has its own versions of these types that can be used instead to avoid pulling in Substrate dependencies. -//! Using the Subxt versions also makes interacting with generated code (which uses them in some places) a little nicer: -//! -//! - `sp_runtime::MultiAddress` can be swapped with [`crate::utils::MultiAddress`]. -//! - `sp_runtime::MultiSignature` can be swapped with [`crate::utils::MultiSignature`]. -//! -//! ## ExtrinsicParams -//! -//! Chains each have a set of "transaction extensions" (formally called "signed extensions") configured. Transaction extensions provide -//! a means to extend how transactions work. Each transaction extension can potentially encode some "extra" data which is sent along with a transaction, as well as some -//! "additional" data which is included in the transaction signer payload, but not transmitted along with the transaction. On -//! a node, transaction extensions can then perform additional checks on the submitted transactions to ensure their validity. -//! -//! The `ExtrinsicParams` config type expects to be given an implementation of the [`crate::config::ExtrinsicParams`] trait. -//! Implementations of the [`crate::config::ExtrinsicParams`] trait are handed some parameters from Subxt itself, and can -//! accept arbitrary other `Params` from users, and are then expected to provide this "extra" and "additional" data when asked -//! via the required [`crate::config::ExtrinsicParamsEncoder`] impl. -//! -//! **In most cases, the default [crate::config::DefaultExtrinsicParams] type will work**: it understands the "standard" -//! transaction extensions that are in use, and allows the user to provide things like a tip, and set the extrinsic mortality via -//! [`crate::config::DefaultExtrinsicParamsBuilder`]. It will use the chain metadata to decide which transaction extensions to use -//! and in which order. It will return an error if the chain uses a transaction extension which it doesn't know how to handle. -//! -//! If the chain uses novel transaction extensions (or if you just wish to provide a different interface for users to configure -//! transactions), you can either: -//! -//! 1. Implement a new transaction extension and add it to the list. -//! 2. Implement [`crate::config::DefaultExtrinsicParams`] from scratch. -//! -//! See below for examples of each. -//! -//! ### Finding out which transaction extensions a chain is using. -//! -//! In either case, you'll want to find out which transaction extensions a chain is using. This information can be obtained from -//! the `SignedExtra` parameter of the `UncheckedExtrinsic` of your parachain, which will be a tuple of transaction extensions. -//! It can also be obtained from the metadata (see [`frame_metadata::v15::SignedExtensionMetadata`]). -//! -//! For statemint, the transaction extensions look like -//! [this](https://github.com/paritytech/cumulus/blob/d4bb2215bb28ee05159c4c7df1b3435177b5bf4e/parachains/runtimes/assets/asset-hub-polkadot/src/lib.rs#L786): -//! -//! ```rust,ignore -//! pub type SignedExtra = ( -//! frame_system::CheckNonZeroSender, -//! frame_system::CheckSpecVersion, -//! frame_system::CheckTxVersion, -//! frame_system::CheckGenesis, -//! frame_system::CheckEra, -//! frame_system::CheckNonce, -//! frame_system::CheckWeight, -//! pallet_asset_tx_payment::ChargeAssetTxPayment, -//! ); -//! ``` -//! -//! Each element of the `SignedExtra` tuple implements [codec::Encode] and `sp_runtime::traits::SignedExtension` -//! which has an associated type `AdditionalSigned` that also implements [codec::Encode]. Let's look at the underlying types -//! for each tuple element. All zero-sized types have been replaced by `()` for simplicity. -//! -//! | tuple element | struct type | `AdditionalSigned` type | -//! | ------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | -//! | [`frame_system::CheckNonZeroSender`](https://docs.rs/frame-system/latest/frame_system/struct.CheckNonZeroSender.html) | () | () | -//! | [`frame_system::CheckSpecVersion`](https://docs.rs/frame-system/latest/frame_system/struct.CheckSpecVersion.html) | () | [u32] | -//! | [`frame_system::CheckTxVersion`](https://docs.rs/frame-system/latest/frame_system/struct.CheckTxVersion.html) | () | [u32] | -//! | [`frame_system::CheckGenesis`](https://docs.rs/frame-system/latest/frame_system/struct.CheckGenesis.html) | () | `Config::Hash` = `sp_core::H256` | -//! | [`frame_system::CheckMortality`](https://docs.rs/frame-system/latest/frame_system/struct.CheckMortality.html) | `sp_runtime::generic::Era` | `Config::Hash` = `sp_core::H256` | -//! | [`frame_system::CheckNonce`](https://docs.rs/frame-system/latest/frame_system/struct.CheckNonce.html) | `frame_system::pallet::Config::Index` = u32 | () | -//! | [`frame_system::CheckWeight`](https://docs.rs/frame-system/latest/frame_system/struct.CheckWeight.html) | () | () | -//! | [`frame_system::ChargeAssetTxPayment`](https://docs.rs/frame-system/latest/frame_system/struct.ChargeAssetTxPayment.html) | [pallet_asset_tx_payment::ChargeAssetTxPayment](https://docs.rs/pallet-asset-tx-payment/latest/pallet_asset_tx_payment/struct.ChargeAssetTxPayment.html) | () | -//! -//! All types in the `struct type` column make up the "extra" data that we're expected to provide. All types in the -//! `AdditionalSigned` column make up the "additional" data that we're expected to provide. This information will be useful -//! whether we want to implement [`crate::config::TransactionExtension`] for a transaction extension, or implement -//! [`crate::config::ExtrinsicParams`] from scratch. -//! -//! As it happens, all of the transaction extensions in the table are either already exported in [`crate::config::transaction_extensions`], -//! or they hand back no "additional" or "extra" data. In both of these cases, the default `ExtrinsicParams` configuration will -//! work out of the box. -//! -//! ### Implementing and adding new transaction extensions to the config -//! -//! If you do need to implement a novel transaction extension, then you can implement [`crate::config::transaction_extensions::TransactionExtension`] -//! on a custom type and place it into a new set of transaction extensions, like so: -//! -//! ```rust,ignore -#![doc = include_str ! ("../../../examples/setup_config_transaction_extension.rs")] -//! ``` -//! -//! ### Implementing [`crate::config::ExtrinsicParams`] from scratch -//! -//! Alternately, you are free to implement [`crate::config::ExtrinsicParams`] entirely from scratch if you know exactly what "extra" and -//! "additional" data your node needs and would prefer to craft your own interface. -//! -//! Let's see what this looks like (this config won't work on any real node): -//! -//! ```rust,ignore -#![doc = include_str ! ("../../../examples/setup_config_custom.rs")] -//! ``` -//! -//! ### Using a type from the metadata as a config parameter -//! -//! You can also use types that are generated from chain metadata as type parameters of the Config trait. -//! Just make sure all trait bounds are satisfied. This can often be achieved by using custom derives with the subxt macro. -//! For example, the AssetHub Parachain expects tips to include a `MultiLocation`, which is a type we can draw from the metadata. -//! -//! This example shows what using the `MultiLocation` struct as part of your config would look like in subxt: -//! -//! ```rust,ignore -#![doc = include_str ! ("../../../examples/setup_config_assethub.rs")] -//! ``` diff --git a/subxt/src/book/setup/mod.rs b/subxt/src/book/setup/mod.rs deleted file mode 100644 index 3dbcc37fa9..0000000000 --- a/subxt/src/book/setup/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This modules contains details on setting up Subxt: -//! -//! - [Codegen](codegen) -//! - [Client](client) -//! -//! Alternately, [go back](super). - -pub mod client; -pub mod codegen; -pub mod config; diff --git a/subxt/src/book/usage/blocks.rs b/subxt/src/book/usage/blocks.rs deleted file mode 100644 index ac690167cb..0000000000 --- a/subxt/src/book/usage/blocks.rs +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # Blocks -//! -//! The [blocks API](crate::blocks::BlocksClient) in Subxt unifies many of the other interfaces, and -//! allows you to: -//! -//! - Access information about specific blocks (see [`crate::blocks::BlocksClient::at()`] and -//! [`crate::blocks::BlocksClient::at_latest()`]). -//! - Subscribe to [all](crate::blocks::BlocksClient::subscribe_all()), -//! [best](crate::blocks::BlocksClient::subscribe_best()) or -//! [finalized](crate::blocks::BlocksClient::subscribe_finalized()) blocks as they are produced. -//! **Prefer to subscribe to finalized blocks unless you know what you're doing.** -//! -//! In either case, you'll end up with [`crate::blocks::Block`]'s, from which you can access various -//! information about the block, such a the [header](crate::blocks::Block::header()), -//! [block number](crate::blocks::Block::number()) and [body (the extrinsics)](crate::blocks::Block::extrinsics()). -//! [`crate::blocks::Block`]'s also provide shortcuts to other Subxt APIs that will operate at the -//! given block: -//! -//! - [storage](crate::blocks::Block::storage()), -//! - [events](crate::blocks::Block::events()) -//! - [runtime APIs](crate::blocks::Block::runtime_api()) -//! -//! Aside from these links to other Subxt APIs, the main thing that we can do here is iterate over and -//! decode the extrinsics in a block body. -//! -//! ## Decoding Extrinsics -//! -//! Given a block, you can [download the block body](crate::blocks::Block::extrinsics()) and -//! [iterate over the extrinsics](crate::blocks::Extrinsics::iter) stored within it. The extrinsics yielded are of type -//! [ExtrinsicDetails](crate::blocks::ExtrinsicDetails), which is just a blob of bytes that also stores which -//! pallet and call in that pallet it belongs to. It also contains information about signed extensions that -//! have been used for submitting this extrinsic. -//! -//! To use the extrinsic, you probably want to decode it into a concrete Rust type. These Rust types representing -//! extrinsics from different pallets can be generated from metadata using the subxt macro or the CLI tool. -//! -//! When decoding the extrinsic into a static type you have two options: -//! -//! ### Statically decode the extrinsics into [the root extrinsic type](crate::blocks::ExtrinsicDetails::as_root_extrinsic()) -//! -//! The root extrinsic type generated by subxt is a Rust enum with one variant for each pallet. Each of these -//! variants has a field that is another enum whose variants cover all calls of the respective pallet. -//! If the extrinsic bytes are valid and your metadata matches the chain's metadata, decoding the bytes of an extrinsic into -//! this root extrinsic type should always succeed. -//! -//! This example shows how to subscribe to blocks and decode the extrinsics in each block into the root extrinsic type. -//! Once we get hold of the [ExtrinsicDetails](crate::blocks::ExtrinsicDetails), we can decode it statically or dynamically. -//! We can also access details about the extrinsic, including the associated events and transaction extensions. -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/blocks_subscribing.rs")] -//! ``` -//! -//! ### Statically decode the extrinsic into [a specific pallet call](crate::blocks::ExtrinsicDetails::as_extrinsic()) -//! -//! This is useful if you are expecting a specific extrinsic to be part of some block. If the extrinsic you try to decode -//! is a different extrinsic, an `Ok(None)` value is returned from [`as_extrinsic::()`](crate::blocks::ExtrinsicDetails::as_extrinsic()); -//! -//! If you are only interested in finding specific extrinsics in a block, you can also [iterate over all of them](crate::blocks::Extrinsics::find), -//! get only [the first one](crate::blocks::Extrinsics::find_first), or [the last one](crate::blocks::Extrinsics::find_last). -//! -//! The following example monitors `TransferKeepAlive` extrinsics on the Polkadot network. -//! We statically decode them and access the [tip](crate::blocks::ExtrinsicTransactionExtensions::tip()) and -//! [account nonce](crate::blocks::ExtrinsicTransactionExtensions::nonce()) transaction extensions. -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/block_decoding_static.rs")] -//! ``` -//! -//! ### Dynamically decode the extrinsic -//! -//! Sometimes you might use subxt with metadata that is not known at compile time. In this case, you do not -//! have access to a statically generated interface module that contains the relevant Rust types. You can -//! [decode ExtrinsicDetails dynamically](crate::blocks::ExtrinsicDetails::decode_as_fields()), which gives -//! you access to it's fields as a [scale value composite](scale_value::Composite). The following example -//! looks for signed extrinsics on the Polkadot network and retrieves their pallet name, variant name, data -//! fields and transaction extensions dynamically. Notice how we do not need to use code generation via the -//! subxt macro. The only fixed component we provide is the [PolkadotConfig](crate::config::PolkadotConfig). -//! Other than that it works in a chain-agnostic way: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/block_decoding_dynamic.rs")] -//! ``` -//! -//! ## Decoding transaction extensions -//! -//! Extrinsics can contain transaction extensions. The transaction extensions can be different across chains. -//! The [Config](crate::Config) implementation for your chain defines which transaction extensions you expect. -//! Once you get hold of the [ExtrinsicDetails](crate::blocks::ExtrinsicDetails) for an extrinsic you are interested in, -//! you can try to [get its transaction extensions](crate::blocks::ExtrinsicDetails::transaction_extensions()). -//! These are only available on V4 signed extrinsics or V5 general extrinsics. You can try to -//! [find a specific transaction extension](crate::blocks::ExtrinsicTransactionExtensions::find), in the returned -//! [transaction extensions](crate::blocks::ExtrinsicTransactionExtensions). -//! -//! Subxt also provides utility functions to get the [tip](crate::blocks::ExtrinsicTransactionExtensions::tip()) and -//! the [account nonce](crate::blocks::ExtrinsicTransactionExtensions::nonce()) associated with an extrinsic, given -//! its transaction extensions. If you prefer to do things dynamically you can get the data of the transaction extension -//! as a [scale value](crate::blocks::ExtrinsicTransactionExtension::value()). -//! diff --git a/subxt/src/book/usage/constants.rs b/subxt/src/book/usage/constants.rs deleted file mode 100644 index 2fade78785..0000000000 --- a/subxt/src/book/usage/constants.rs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # Constants -//! -//! There are various constants stored in a node; the types and values of these are defined in a -//! runtime, and can only change when the runtime is updated. Much like [`super::storage`], we can -//! query these using Subxt by taking the following steps: -//! -//! 1. [Constructing a constant query](#constructing-a-query). -//! 2. [Submitting the query to get back the associated value](#submitting-it). -//! -//! ## Constructing a constant query -//! -//! We can use the statically generated interface to build constant queries: -//! -//! ```rust,no_run,standalone_crate -//! #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] -//! pub mod polkadot {} -//! -//! let constant_query = polkadot::constants().system().block_length(); -//! ``` -//! -//! Alternately, we can dynamically construct a constant query. A dynamic query needs the return -//! type to be specified, where we can use [`crate::dynamic::Value`] if unsure: -//! -//! ```rust,no_run,standalone_crate -//! use subxt::dynamic::Value; -//! -//! let storage_query = subxt::dynamic::constant::("System", "BlockLength"); -//! ``` -//! -//! ## Submitting it -//! -//! Call [`crate::constants::ConstantsClient::at()`] to return and decode the constant into the -//! type given by the address, or [`crate::constants::ConstantsClient::bytes_at()`] to return the -//! raw bytes for some constant. -//! -//! Constant values are pulled directly out of the node metadata which Subxt has -//! already acquired, and so this function requires no network access and is available from a -//! [`crate::OfflineClient`]. -//! -//! Here's an example using a static query: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/constants_static.rs")] -//! ``` -//! -//! And here's one using a dynamic query: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/constants_dynamic.rs")] -//! ``` -//! diff --git a/subxt/src/book/usage/custom_values.rs b/subxt/src/book/usage/custom_values.rs deleted file mode 100644 index 8f27e9fe7f..0000000000 --- a/subxt/src/book/usage/custom_values.rs +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # Custom Values -//! -//! Substrate-based chains can expose custom values in their metadata. -//! Each of these values: -//! -//! - can be accessed by a unique __name__. -//! - refers to a concrete __type__ stored in the metadata. -//! - contains a scale encoded __value__ of that type. -//! -//! ## Getting a custom value -//! -//! First, you must construct an address to access a custom value. This can be either: -//! - a raw [`str`] which assumes the return type to be the dynamic [`crate::dynamic::Value`] type, -//! - created via [`dynamic`](crate::custom_values::dynamic) function whereby you set the return type -//! that you want back, -//! - created via statically generated addresses as part of the `#[subxt]` macro which define the return type. -//! -//! With an address, use [`at`](crate::custom_values::CustomValuesClient::at) to access and decode specific values, and -//! [`bytes_at`](crate::custom_values::CustomValuesClient::bytes_at) to access the raw bytes. -//! -//! ## Examples -//! -//! Dynamically accessing a custom value using a [`str`] to select which one: -//! -//! ```rust,ignore -//! use subxt::{OnlineClient, PolkadotConfig, ext::scale_decode::DecodeAsType}; -//! use subxt::dynamic::Value; -//! -//! let api = OnlineClient::::new().await?; -//! let custom_value_client = api.custom_values(); -//! let foo: Value = custom_value_client.at("foo")?; -//! ``` -//! -//! Use the [`dynamic`](crate::custom_values::dynamic) function to select the return type: -//! -//! ```rust,ignore -//! use subxt::{OnlineClient, PolkadotConfig, ext::scale_decode::DecodeAsType}; -//! -//! #[derive(Decode, DecodeAsType, Debug)] -//! struct Foo { -//! n: u8, -//! b: bool, -//! } -//! -//! let api = OnlineClient::::new().await?; -//! let custom_value_client = api.custom_values(); -//! let custom_value_addr = subxt::custom_values::dynamic::("foo"); -//! let foo: Foo = custom_value_client.at(&custom_value_addr)?; -//! ``` -//! -//! Alternatively we also provide a statically generated api for custom values: -//! -//! ```rust,ignore -//! #[subxt::subxt(runtime_metadata_path = "some_metadata.scale")] -//! pub mod interface {} -//! -//! let static_address = interface::custom().foo(); -//! -//! let api = OnlineClient::::new().await?; -//! let custom_value_client = api.custom_values(); -//! -//! // Now the `at()` function already decodes the value into the Foo type: -//! let foo = custom_value_client.at(&static_address)?; -//! ``` -//! diff --git a/subxt/src/book/usage/events.rs b/subxt/src/book/usage/events.rs deleted file mode 100644 index e851f38b95..0000000000 --- a/subxt/src/book/usage/events.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # Events -//! -//! In the process of adding extrinsics to a block, they are executed. When extrinsics are executed, -//! they normally produce events describing what's happening (at the very least, an event dictating whether -//! the extrinsic has succeeded or failed). The node may also emit some events of its own as the block is -//! processed. -//! -//! Events live in a single location in node storage which is overwritten at each block. Normal nodes tend to -//! keep a snapshot of the state at a small number of previous blocks, so you can sometimes access -//! older events by using [`crate::events::EventsClient::at()`] and providing an older block hash. -//! -//! When we submit transactions using Subxt, methods like [`crate::tx::TxProgress::wait_for_finalized_success()`] -//! return [`crate::blocks::ExtrinsicEvents`], which can be used to iterate and inspect the events produced -//! by that transaction being executed. We can also access _all_ of the events produced in a single block using one -//! of these two interfaces: -//! -//! ```rust,no_run,standalone_crate -//! # #[tokio::main] -//! # async fn main() -> Result<(), Box> { -//! use subxt::client::OnlineClient; -//! use subxt::config::PolkadotConfig; -//! -//! // Create client: -//! let client = OnlineClient::::new().await?; -//! -//! // Get events from the latest block (use .at() to specify a block hash): -//! let events = client.blocks().at_latest().await?.events().await?; -//! // We can use this shorthand too: -//! let events = client.events().at_latest().await?; -//! # Ok(()) -//! # } -//! ``` -//! -//! Once we've loaded our events, we can iterate all events or search for specific events via -//! methods like [`crate::events::Events::iter()`] and [`crate::events::Events::find()`]. See -//! [`crate::events::Events`] and [`crate::events::EventDetails`] for more information. -//! -//! ## Example -//! -//! Here's an example which puts this all together: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/events.rs")] -//! ``` -//! diff --git a/subxt/src/book/usage/light_client.rs b/subxt/src/book/usage/light_client.rs deleted file mode 100644 index 8dbba244fc..0000000000 --- a/subxt/src/book/usage/light_client.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # Light Client -//! -//! The light client based interface uses _Smoldot_ to connect to a _chain_, rather than an individual -//! node. This means that you don't have to trust a specific node when interacting with some chain. -//! -//! This feature is currently unstable. Use the `unstable-light-client` feature flag to enable it. -//! To use this in WASM environments, enable the `web` feature flag and disable the "native" one. -//! -//! To connect to a blockchain network, the Light Client requires a trusted sync state of the network, -//! known as a _chain spec_. One way to obtain this is by making a `sync_state_genSyncSpec` RPC call to a -//! trusted node belonging to the chain that you wish to interact with. -//! -//! Subxt exposes a utility method to obtain the chain spec: [`crate::utils::fetch_chainspec_from_rpc_node()`]. -//! Alternately, you can manually make an RPC call to `sync_state_genSyncSpec` like do (assuming a node running -//! locally on port 9933): -//! -//! ```bash -//! curl -H "Content-Type: application/json" -d '{"id":1, "jsonrpc":"2.0", "method": "sync_state_genSyncSpec", "params":[true]}' http://localhost:9933/ | jq .result > chain_spec.json -//! ``` -//! -//! ## Examples -//! -//! ### Basic Example -//! -//! This basic example uses some already-known chain specs to connect to a relay chain and parachain -//! and stream information about their finalized blocks: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/light_client_basic.rs")] -//! ``` -//! -//! ### Connecting to a local node -//! -//! This example connects to a local chain and submits a transaction. To run this, you first need -//! to have a local polkadot node running using the following command: -//! -//! ```text -//! polkadot --dev --node-key 0000000000000000000000000000000000000000000000000000000000000001 -//! ``` -//! -//! Then, the following code will download a chain spec from this local node, alter the bootnodes -//! to point only to the local node, and then submit a transaction through it. -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/light_client_local_node.rs")] -//! ``` -//! diff --git a/subxt/src/book/usage/mod.rs b/subxt/src/book/usage/mod.rs deleted file mode 100644 index 77eaf21dd5..0000000000 --- a/subxt/src/book/usage/mod.rs +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This modules contains examples of using Subxt; follow the links for more: -//! -//! - [Transactions](transactions) -//! - [Storage](storage) -//! - [Events](events) -//! - [Constants](constants) -//! - [Blocks](blocks) -//! - [Runtime APIs](runtime_apis) -//! - [Unstable Light Client](light_client) -//! - [Custom Values](custom_values) -//! - [RPC calls](rpc) -//! -//! Alternately, [go back](super). - -pub mod blocks; -pub mod constants; -pub mod custom_values; -pub mod events; -pub mod light_client; -pub mod rpc; -pub mod runtime_apis; -pub mod storage; -pub mod transactions; diff --git a/subxt/src/book/usage/rpc.rs b/subxt/src/book/usage/rpc.rs deleted file mode 100644 index b3f6468d6f..0000000000 --- a/subxt/src/book/usage/rpc.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # RPC calls -//! -//! The RPC interface is provided by the [`subxt_rpcs`] crate but re-exposed here. We have: -//! -//! - [`crate::backend::rpc::RpcClient`] and [`crate::backend::rpc::RpcClientT`]: the underlying type and trait -//! which provides a basic RPC client. -//! - [`crate::backend::legacy::rpc_methods`] and [`crate::backend::chain_head::rpc_methods`]: RPc methods that -//! can be instantiated with an RPC client. -//! -//! See [`subxt_rpcs`] or [`crate::ext::subxt_rpcs`] for more. -//! -//! # Example -//! -//! Here's an example which calls some legacy JSON-RPC methods, and reuses the same connection to run a full Subxt client -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/rpc_legacy.rs")] -//! ``` diff --git a/subxt/src/book/usage/runtime_apis.rs b/subxt/src/book/usage/runtime_apis.rs deleted file mode 100644 index d944e051ac..0000000000 --- a/subxt/src/book/usage/runtime_apis.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # Runtime API interface -//! -//! The Runtime API interface allows Subxt to call runtime APIs exposed by certain pallets in order -//! to obtain information. Much like [`super::storage`] and [`super::transactions`], Making a runtime -//! call to a node and getting the response back takes the following steps: -//! -//! 1. [Constructing a runtime call](#constructing-a-runtime-call) -//! 2. [Submitting it to get back the response](#submitting-it) -//! -//! **Note:** Runtime APIs are only available when using V15 metadata, which is currently unstable. -//! You'll need to use `subxt metadata --version unstable` command to download the unstable V15 metadata, -//! and activate the `unstable-metadata` feature in Subxt for it to also use this metadata from a node. The -//! metadata format is unstable because it may change and break compatibility with Subxt at any moment, so -//! use at your own risk. -//! -//! ## Constructing a runtime call -//! -//! We can use the statically generated interface to build runtime calls: -//! -//! ```rust,no_run,standalone_crate -//! #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -//! pub mod polkadot {} -//! -//! let runtime_call = polkadot::apis().metadata().metadata_versions(); -//! ``` -//! -//! Alternately, we can dynamically construct a runtime call. The input type can be a tuple or -//! vec or valid types implementing [`scale_encode::EncodeAsType`], and the output can be anything -//! implementing [`scale_decode::DecodeAsType`]: -//! -//! ```rust,no_run -//! use subxt::dynamic::Value; -//! -//! let runtime_call = subxt::dynamic::runtime_api_call::<(), Vec>( -//! "Metadata", -//! "metadata_versions", -//! () -//! ); -//! ``` -//! -//! All valid runtime calls implement [`crate::runtime_api::Payload`], a trait which -//! describes how to encode the runtime call arguments and what return type to decode from the -//! response. -//! -//! ## Submitting it -//! -//! Runtime calls can be handed to [`crate::runtime_api::RuntimeApi::call()`], which will submit -//! them and hand back the associated response. -//! -//! ### Making a static Runtime API call -//! -//! The easiest way to make a runtime API call is to use the statically generated interface. -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/runtime_apis_static.rs")] -//! ``` -//! -//! ### Making a dynamic Runtime API call -//! -//! If you'd prefer to construct the call at runtime, you can do this using the -//! [`crate::dynamic::runtime_api_call`] method. -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/runtime_apis_dynamic.rs")] -//! ``` -//! -//! ### Making a raw call -//! -//! This is generally discouraged in favour of one of the above, but may be necessary (especially if -//! the node you're talking to does not yet serve V15 metadata). Here, you must manually encode -//! the argument bytes and manually provide a type for the response bytes to be decoded into. -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/runtime_apis_raw.rs")] -//! ``` -//! diff --git a/subxt/src/book/usage/storage.rs b/subxt/src/book/usage/storage.rs deleted file mode 100644 index eba5225052..0000000000 --- a/subxt/src/book/usage/storage.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # Storage -//! -//! A Substrate based chain can be seen as a key/value database which starts off at some initial -//! state, and is modified by the extrinsics in each block. This database is referred to as the -//! node storage. With Subxt, you can query this key/value storage with the following steps: -//! -//! 1. [Constructing a storage query](#constructing-a-storage-query). -//! 2. [Submitting the query to get back the associated entry](#submitting-it). -//! 3. [Fetching](#fetching-storage-entries) or [iterating](#iterating-storage-entries) over that -//! entry to retrieve the value or values within it. -//! -//! ## Constructing a storage query -//! -//! We can use the statically generated interface to build storage queries: -//! -//! ```rust,no_run,standalone_crate -//! #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -//! pub mod polkadot {} -//! -//! let storage_query = polkadot::storage().system().account(); -//! ``` -//! -//! Alternately, we can dynamically construct a storage query. A dynamic query needs the input -//! and return value types to be specified, where we can use [`crate::dynamic::Value`] if unsure. -//! -//! ```rust,no_run,standalone_crate -//! use subxt::dynamic::Value; -//! -//! let storage_query = subxt::dynamic::storage::<(Value,), Value>("System", "Account"); -//! ``` -//! -//! ## Submitting it -//! -//! Storage queries can be handed to various functions in [`crate::storage::StorageClientAt`] in order to -//! obtain the associated values (also referred to as storage entries) back. -//! -//! The core API here is [`crate::storage::StorageClientAt::entry()`], which takes a query and looks up the -//! corresponding storage entry, from which you can then fetch or iterate over the values contained within. -//! [`crate::storage::StorageClientAt::fetch()`] and [`crate::storage::StorageClientAt::iter()`] are shorthand -//! for this. -//! -//! When you wish to manually query some entry, [`crate::storage::StorageClientAt::fetch_raw()`] exists to take -//! in raw bytes pointing at some storage value, and return the value bytes if possible. [`crate::storage::StorageClientAt::storage_version()`] -//! and [`crate::storage::StorageClientAt::runtime_wasm_code()`] use this to retrieve the version of some storage API -//! and the current Runtime WASM blob respectively. -//! -//! ### Fetching storage entries -//! -//! The simplest way to access storage entries is to construct a query and then call either -//! [`crate::storage::StorageClientAt::fetch()`]: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/storage_fetch.rs")] -//! ``` -//! -//! For completeness, below is an example using a dynamic query instead. Dynamic queries can define the types that -//! they wish to accept inputs and decode the return value into ([`crate::dynamic::Value`] can be used here anywhere we -//! are not sure of the specific types). -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/storage_fetch_dynamic.rs")] -//! ``` -//! -//! ### Iterating storage entries -//! -//! Many storage entries are maps of values; as well as fetching individual values, it's possible to -//! iterate over all of the values stored at that location: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/storage_iterating.rs")] -//! ``` -//! -//! Here's the same logic but using dynamically constructed values instead: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/storage_iterating_dynamic.rs")] -//! ``` -//! diff --git a/subxt/src/book/usage/transactions.rs b/subxt/src/book/usage/transactions.rs deleted file mode 100644 index c427acd5de..0000000000 --- a/subxt/src/book/usage/transactions.rs +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! # Transactions -//! -//! A transaction is an extrinsic that's signed (ie it originates from a given address). The purpose -//! of extrinsics is to modify the node storage in a deterministic way, and so being able to submit -//! transactions to a node is one of the core features of Subxt. -//! -//! > Note: the documentation tends to use the terms _extrinsic_ and _transaction_ interchangeably; -//! > An extrinsic is some data that can be added to a block, and is either signed (a _transaction_) -//! > or unsigned (an _inherent_). Subxt can construct either, but overwhelmingly you'll need to -//! > sign the payload you'd like to submit. -//! -//! Submitting a transaction to a node consists of the following steps: -//! -//! 1. [Constructing a transaction payload to submit](#constructing-a-transaction-payload). -//! 2. [Signing it](#signing-it). -//! 3. [Submitting it (optionally with some additional parameters)](#submitting-it). -//! -//! We'll look at each of these steps in turn. -//! -//! ## Constructing a transaction payload -//! -//! We can use the statically generated interface to build transaction payloads: -//! -//! ```rust,no_run,standalone_crate -//! #[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] -//! pub mod polkadot {} -//! -//! let remark = "Hello there".as_bytes().to_vec(); -//! let tx_payload = polkadot::tx().system().remark(remark); -//! ``` -//! -//! > If you're not sure what types to import and use to build a given payload, you can use the -//! > `subxt` CLI tool to generate the interface by using something like `subxt codegen | rustfmt > -//! > interface.rs`, to see what types and things are available (or even just to use directly -//! > instead of the [`#[subxt]`](crate::subxt) macro). -//! -//! Alternately, we can dynamically construct a transaction payload. This will not be type checked or -//! validated until it's submitted: -//! -//! ```rust,no_run,standalone_crate -//! use subxt::dynamic::Value; -//! -//! let tx_payload = subxt::dynamic::tx("System", "remark", vec![ -//! Value::from_bytes("Hello there") -//! ]); -//! ``` -//! -//! The [`crate::dynamic::Value`] type is a dynamic type much like a `serde_json::Value` but instead -//! represents any type of data that can be SCALE encoded or decoded. It can be serialized, -//! deserialized and parsed from/to strings. -//! -//! A valid transaction payload is just something that implements the [`crate::tx::Payload`] trait; -//! you can implement this trait on your own custom types if the built-in ones are not suitable for -//! your needs. -//! -//! ## Signing it -//! -//! You'll normally need to sign an extrinsic to prove that it originated from an account that you -//! control. To do this, you will typically first create a [`crate::tx::Signer`] instance, which tells -//! Subxt who the extrinsic is from, and takes care of signing the relevant details to prove this. -//! -//! There are two main ways to create a compatible signer instance: -//! 1. The `subxt_signer` crate provides a WASM compatible implementation of [`crate::tx::Signer`] -//! for chains which require sr25519 or ecdsa signatures (requires the `subxt` feature to be enabled). -//! 2. Alternately, implement your own [`crate::tx::Signer`] instance by wrapping it in a new type pattern. -//! -//! Going for 1 leads to fewer dependencies being imported and WASM compatibility out of the box via -//! the `web` feature flag. Going for 2 is useful if you're already using the Substrate dependencies or -//! need additional signing algorithms that `subxt_signer` doesn't support, and don't care about WASM -//! compatibility. -//! -//! Because 2 is more complex and require more code, we'll focus on 1 here. -//! For 2, see the example in `subxt/examples/substrate_compat_signer.rs` how -//! you can integrate things like sp_core's signer in subxt. -//! -//! Let's go through how to create a signer using the `subxt_signer` crate: -//! -//! ```rust,standalone_crate -//! use subxt::config::PolkadotConfig; -//! use std::str::FromStr; -//! -//! use subxt_signer::{SecretUri, sr25519}; -//! -//! // Get hold of a `Signer` for a test account: -//! let alice = sr25519::dev::alice(); -//! -//! // Or generate a keypair, here from an SURI: -//! let uri = SecretUri::from_str("vessel ladder alter error federal sibling chat ability sun glass valve picture/0/1///Password") -//! .expect("valid URI"); -//! let keypair = sr25519::Keypair::from_uri(&uri) -//! .expect("valid keypair"); -//!``` -//! -//! After initializing the signer, let's also go through how to create a transaction and sign it: -//! -//! ```rust,no_run,standalone_crate -//! # #[tokio::main] -//! # async fn main() -> Result<(), Box> { -//! use subxt::client::OnlineClient; -//! use subxt::config::PolkadotConfig; -//! use subxt::dynamic::Value; -//! -//! // Create client: -//! let client = OnlineClient::::new().await?; -//! -//! // Create a dummy tx payload to sign: -//! let payload = subxt::dynamic::tx("System", "remark", vec![ -//! Value::from_bytes("Hello there") -//! ]); -//! -//! // Construct the tx but don't sign it. The account nonce here defaults to 0. -//! // You can use `create_partial` to fetch the correct nonce. -//! let mut partial_tx = client.tx().create_partial_offline( -//! &payload, -//! Default::default() -//! )?; -//! -//! // Fetch the payload that needs to be signed: -//! let signer_payload = partial_tx.signer_payload(); -//! -//! // ... At this point, we can hand off the `signer_payload` to be signed externally. -//! // Ultimately we need to be given back a `signature` (or really, anything -//! // that can be SCALE encoded) and an `address`: -//! let signature; -//! let account_id; -//! # use subxt::tx::Signer; -//! # let signer = subxt_signer::sr25519::dev::alice(); -//! # signature = signer.sign(&signer_payload).into(); -//! # account_id = signer.public_key().to_account_id(); -//! -//! // Now we can build an tx, which one can call `submit` or `submit_and_watch` -//! // on to submit to a node and optionally watch the status. -//! let tx = partial_tx.sign_with_account_and_signature( -//! &account_id, -//! &signature -//! ); -//! # Ok(()) -//! # } -//! ``` -//! -//! ## Submitting it -//! -//! Once we have signed the transaction, we need to submit it. -//! -//! ### The high level API -//! -//! The highest level approach to doing this is to call -//! [`crate::tx::TxClient::sign_and_submit_then_watch_default`]. This hands back a -//! [`crate::tx::TxProgress`] struct which will monitor the transaction status. We can then call -//! [`crate::tx::TxProgress::wait_for_finalized_success()`] to wait for this transaction to make it -//! into a finalized block, check for an `ExtrinsicSuccess` event, and then hand back the events for -//! inspection. This looks like: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/tx_basic.rs")] -//! ``` -//! -//! ### Providing transaction parameters -//! -//! If you'd like to provide parameters (such as mortality) to the transaction, you can use -//! [`crate::tx::TxClient::sign_and_submit_then_watch`] instead: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/tx_with_params.rs")] -//! ``` -//! -//! This example doesn't wait for the transaction to be included in a block; it just submits it and -//! hopes for the best! -//! -//! ### Boxing transaction payloads -//! -//! Transaction payloads can be boxed so that they all share a common type and can be stored together. -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/tx_boxed.rs")] -//! ``` -//! -//! ### Custom handling of transaction status updates -//! -//! If you'd like more control or visibility over exactly which status updates are being emitted for -//! the transaction, you can monitor them as they are emitted and react however you choose: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/tx_status_stream.rs")] -//! ``` -//! -//! ### Signing transactions externally -//! -//! Subxt also allows you to get hold of the signer payload and hand that off to something else to be -//! signed. The signature can then be provided back to Subxt to build the final transaction to submit: -//! -//! ```rust,ignore -#![doc = include_str!("../../../examples/tx_partial.rs")] -//! ``` -//! -//! Take a look at the API docs for [`crate::tx::TxProgress`], [`crate::tx::TxStatus`] and -//! [`crate::tx::TxInBlock`] for more options. -//! diff --git a/new/src/client.rs b/subxt/src/client.rs similarity index 88% rename from new/src/client.rs rename to subxt/src/client.rs index 76e3cafaad..fdbf9a942d 100644 --- a/new/src/client.rs +++ b/subxt/src/client.rs @@ -14,12 +14,14 @@ use core::marker::PhantomData; use subxt_metadata::Metadata; pub use offline_client::{OfflineClient, OfflineClientAtBlock, OfflineClientAtBlockT}; -pub use online_client::{OnlineClient, OnlineClientAtBlock, OnlineClientAtBlockT}; +pub use online_client::{ + BlockNumberOrRef, OnlineClient, OnlineClientAtBlock, OnlineClientAtBlockT, +}; /// This represents a client at a specific block number. #[derive(Clone, Debug)] pub struct ClientAtBlock { - client: Client, + pub(crate) client: Client, marker: PhantomData, } @@ -38,11 +40,17 @@ where T: Config, Client: OfflineClientAtBlockT, { - /// Construct and submit transactions. + /// Construct and submit transactions. This is a + /// shorthand to [`Self::transactions()`]. pub fn tx(&self) -> TransactionsClient<'_, T, Client> { TransactionsClient::new(&self.client) } + /// Construct and submit transactions. + pub fn transactions(&self) -> TransactionsClient<'_, T, Client> { + TransactionsClient::new(&self.client) + } + /// Access storage at this block. pub fn storage(&self) -> StorageClient<'_, T, Client> { StorageClient::new(&self.client) diff --git a/subxt/src/client/mod.rs b/subxt/src/client/mod.rs deleted file mode 100644 index 8b9c917eec..0000000000 --- a/subxt/src/client/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module provides two clients that can be used to work with -//! transactions, storage and events. The [`OfflineClient`] works -//! entirely offline and can be passed to any function that doesn't -//! require network access. The [`OnlineClient`] requires network -//! access. - -mod offline_client; -mod online_client; - -pub use offline_client::{OfflineClient, OfflineClientT}; -pub use online_client::{ - ClientRuntimeUpdater, OnlineClient, OnlineClientT, RuntimeUpdaterStream, Update, -}; -pub use subxt_core::client::{ClientState, RuntimeVersion}; diff --git a/subxt/src/client/offline_client.rs b/subxt/src/client/offline_client.rs index adcb413d4d..8d8c502c14 100644 --- a/subxt/src/client/offline_client.rs +++ b/subxt/src/client/offline_client.rs @@ -1,203 +1,106 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. +use crate::client::ClientAtBlock; +use crate::config::{Config, HashFor, Hasher}; +use crate::error::OfflineClientAtBlockError; +use crate::metadata::{ArcMetadata, Metadata}; -use crate::custom_values::CustomValuesClient; -use crate::{ - Metadata, - blocks::BlocksClient, - config::{Config, HashFor}, - constants::ConstantsClient, - events::EventsClient, - runtime_api::RuntimeApiClient, - storage::StorageClient, - tx::TxClient, - view_functions::ViewFunctionsClient, -}; - -use derive_where::derive_where; -use std::sync::Arc; -use subxt_core::client::{ClientState, RuntimeVersion}; - -/// A trait representing a client that can perform -/// offline-only actions. -pub trait OfflineClientT: Clone + Send + Sync + 'static { - /// Return the provided [`Metadata`]. - fn metadata(&self) -> Metadata; - - /// Return the provided genesis hash. - fn genesis_hash(&self) -> HashFor; - - /// Return the provided [`RuntimeVersion`]. - fn runtime_version(&self) -> RuntimeVersion; - - /// Return the hasher used on the chain. - fn hasher(&self) -> T::Hasher; - - /// Return the [subxt_core::client::ClientState] (metadata, runtime version and genesis hash). - fn client_state(&self) -> ClientState { - ClientState { - genesis_hash: self.genesis_hash(), - runtime_version: self.runtime_version(), - metadata: self.metadata(), - } - } - - /// Work with transactions. - fn tx(&self) -> TxClient { - TxClient::new(self.clone()) - } - - /// Work with events. - fn events(&self) -> EventsClient { - EventsClient::new(self.clone()) - } - - /// Work with storage. - fn storage(&self) -> StorageClient { - StorageClient::new(self.clone()) - } - - /// Access constants. - fn constants(&self) -> ConstantsClient { - ConstantsClient::new(self.clone()) - } - - /// Work with blocks. - fn blocks(&self) -> BlocksClient { - BlocksClient::new(self.clone()) - } - - /// Work with runtime APIs. - fn runtime_api(&self) -> RuntimeApiClient { - RuntimeApiClient::new(self.clone()) - } - - /// Work with View Functions. - fn view_functions(&self) -> ViewFunctionsClient { - ViewFunctionsClient::new(self.clone()) - } - - /// Work this custom types. - fn custom_values(&self) -> CustomValuesClient { - CustomValuesClient::new(self.clone()) - } -} - -/// A client that is capable of performing offline-only operations. -/// Can be constructed as long as you can populate the required fields. -#[derive_where(Debug, Clone)] +#[derive(Clone, Debug)] pub struct OfflineClient { - inner: Arc>, - hasher: T::Hasher, + /// The configuration for this client. + config: T, } impl OfflineClient { - /// Construct a new [`OfflineClient`], providing - /// the necessary runtime and compile-time arguments. - pub fn new( - genesis_hash: HashFor, - runtime_version: RuntimeVersion, - metadata: impl Into, - ) -> OfflineClient { - let metadata = metadata.into(); - let hasher = ::new(&metadata); + /// Create a new [`OfflineClient`] with the given configuration. + pub fn new(config: T) -> Self { + OfflineClient { config } + } - OfflineClient { + /// Pick the block height at which to operate. This references data from the + /// [`OfflineClient`] it's called on, and so cannot outlive it. + pub fn at_block( + &self, + block_number: impl Into, + ) -> Result>, OfflineClientAtBlockError> { + let block_number = block_number.into(); + let (spec_version, transaction_version) = self + .config + .spec_and_transaction_version_for_block_number(block_number) + .ok_or(OfflineClientAtBlockError::SpecVersionNotFound { block_number })?; + + let metadata = self + .config + .metadata_for_spec_version(spec_version) + .ok_or(OfflineClientAtBlockError::MetadataNotFound { spec_version })?; + + let genesis_hash = self.config.genesis_hash(); + + let hasher = ::new(&metadata); + + let offline_client_at_block = OfflineClientAtBlock { + metadata, + block_number, + genesis_hash, + spec_version, hasher, - inner: Arc::new(ClientState { - genesis_hash, - runtime_version, - metadata, - }), - } - } + transaction_version, + }; - /// Return the genesis hash. - pub fn genesis_hash(&self) -> HashFor { - self.inner.genesis_hash - } - - /// Return the runtime version. - pub fn runtime_version(&self) -> RuntimeVersion { - self.inner.runtime_version - } - - /// Return the [`Metadata`] used in this client. - pub fn metadata(&self) -> Metadata { - self.inner.metadata.clone() - } - - /// Return the hasher used for the chain. - pub fn hasher(&self) -> T::Hasher { - self.hasher - } - - // Just a copy of the most important trait methods so that people - // don't need to import the trait for most things: - - /// Work with transactions. - pub fn tx(&self) -> TxClient { - >::tx(self) - } - - /// Work with events. - pub fn events(&self) -> EventsClient { - >::events(self) - } - - /// Work with storage. - pub fn storage(&self) -> StorageClient { - >::storage(self) - } - - /// Access constants. - pub fn constants(&self) -> ConstantsClient { - >::constants(self) - } - - /// Work with blocks. - pub fn blocks(&self) -> BlocksClient { - >::blocks(self) - } - - /// Work with runtime APIs. - pub fn runtime_api(&self) -> RuntimeApiClient { - >::runtime_api(self) - } - - /// Work with View Functions. - pub fn view_functions(&self) -> ViewFunctionsClient { - >::view_functions(self) - } - - /// Access custom types - pub fn custom_values(&self) -> CustomValuesClient { - >::custom_values(self) + Ok(ClientAtBlock::new(offline_client_at_block)) } } -impl OfflineClientT for OfflineClient { - fn genesis_hash(&self) -> HashFor { - self.genesis_hash() - } - fn runtime_version(&self) -> RuntimeVersion { - self.runtime_version() - } - fn metadata(&self) -> Metadata { - self.metadata() - } - fn hasher(&self) -> T::Hasher { - self.hasher() - } +#[derive(Clone)] +pub struct OfflineClientAtBlock { + metadata: ArcMetadata, + block_number: u64, + genesis_hash: Option>, + spec_version: u32, + hasher: T::Hasher, + transaction_version: u32, } -// For ergonomics; cloning a client is deliberately fairly cheap (via Arc), -// so this allows users to pass references to a client rather than explicitly -// cloning. This is partly for consistency with OnlineClient, which can be -// easily converted into an OfflineClient for ergonomics. -impl<'a, T: Config> From<&'a OfflineClient> for OfflineClient { - fn from(c: &'a OfflineClient) -> Self { - c.clone() +/// This represents an offline-only client at a specific block. +#[doc(hidden)] +pub trait OfflineClientAtBlockT: Clone { + /// Get a reference to the metadata appropriate for this block. + fn metadata_ref(&self) -> &Metadata; + /// Get a clone of the metadata appropriate for this block. + fn metadata(&self) -> ArcMetadata; + /// The block number we're operating at. + fn block_number(&self) -> u64; + /// Return the genesis hash for the chain if it is known. + fn genesis_hash(&self) -> Option>; + /// The spec version at the current block. + fn spec_version(&self) -> u32; + /// Return a hasher that works at the current block. + fn hasher(&self) -> &T::Hasher; + /// The transaction version at the current block. + /// + /// Note: This is _not_ the same as the transaction version that + /// is encoded at the beginning of transactions (ie 4 or 5). + fn transaction_version(&self) -> u32; +} + +impl OfflineClientAtBlockT for OfflineClientAtBlock { + fn metadata_ref(&self) -> &Metadata { + &self.metadata + } + fn metadata(&self) -> ArcMetadata { + self.metadata.clone() + } + fn block_number(&self) -> u64 { + self.block_number + } + fn genesis_hash(&self) -> Option> { + self.genesis_hash + } + fn spec_version(&self) -> u32 { + self.spec_version + } + fn transaction_version(&self) -> u32 { + self.transaction_version + } + fn hasher(&self) -> &T::Hasher { + &self.hasher } } diff --git a/subxt/src/client/online_client.rs b/subxt/src/client/online_client.rs index bdc9b03e69..5fbfc48b35 100644 --- a/subxt/src/client/online_client.rs +++ b/subxt/src/client/online_client.rs @@ -1,580 +1,593 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. +mod block_number_or_ref; +mod blocks; -use super::{OfflineClient, OfflineClientT}; -use crate::custom_values::CustomValuesClient; -use crate::{ - Metadata, - backend::{Backend, BackendExt, StreamOfResults, legacy::LegacyBackend, rpc::RpcClient}, - blocks::{BlockRef, BlocksClient}, - config::{Config, HashFor}, - constants::ConstantsClient, - error::{BackendError, OnlineClientError, RuntimeUpdateeApplyError, RuntimeUpdaterError}, - events::EventsClient, - runtime_api::RuntimeApiClient, - storage::StorageClient, - tx::TxClient, - view_functions::ViewFunctionsClient, -}; -use derive_where::derive_where; -use futures::TryFutureExt; -use futures::future; -use std::sync::{Arc, RwLock}; -use subxt_core::client::{ClientState, RuntimeVersion}; +use super::ClientAtBlock; +use super::OfflineClientAtBlockT; +use crate::backend::{Backend, BlockRef, CombinedBackend}; +use crate::config::{Config, HashFor, Hasher, Header}; +use crate::error::{BlocksError, OnlineClientAtBlockError}; +use crate::metadata::{ArcMetadata, Metadata}; +use blocks::Blocks; +use codec::{Compact, Decode, Encode}; +use core::marker::PhantomData; +use frame_decode::helpers::ToTypeRegistry; +use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; +use scale_info_legacy::TypeRegistrySet; +use std::future::Future; +use std::sync::Arc; +use subxt_rpcs::RpcClient; -/// A trait representing a client that can perform -/// online actions. -pub trait OnlineClientT: OfflineClientT { - /// Return a backend that can be used to communicate with a node. - fn backend(&self) -> &dyn Backend; +#[cfg(feature = "jsonrpsee")] +use crate::error::OnlineClientError; + +pub use block_number_or_ref::BlockNumberOrRef; + +/// A client which exposes the means to decode historic data on a chain online. +#[derive(Clone, Debug)] +pub struct OnlineClient { + inner: Arc>, } -/// A client that can be used to perform API calls (that is, either those -/// requiring an [`OfflineClientT`] or those requiring an [`OnlineClientT`]). -#[derive_where(Clone)] -pub struct OnlineClient { - inner: Arc>>, +struct OnlineClientInner { + /// The configuration for this client. + config: T, + /// Chain genesis hash. Needed to construct transactions, + /// so we obtain it up front on constructing this. + genesis_hash: HashFor, + /// The RPC methods used to communicate with the node. backend: Arc>, } -#[derive_where(Debug)] -struct Inner { - genesis_hash: HashFor, - runtime_version: RuntimeVersion, - metadata: Metadata, - hasher: T::Hasher, -} - -impl std::fmt::Debug for OnlineClient { +impl std::fmt::Debug for OnlineClientInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Client") - .field("rpc", &"RpcClient") - .field("inner", &self.inner) + f.debug_struct("OnlineClientInner") + .field("config", &"") + .field("backend", &"Arc") .finish() } } -// The default constructors assume Jsonrpsee. -#[cfg(feature = "jsonrpsee")] -#[cfg_attr(docsrs, doc(cfg(feature = "jsonrpsee")))] impl OnlineClient { /// Construct a new [`OnlineClient`] using default settings which /// point to a locally running node on `ws://127.0.0.1:9944`. - pub async fn new() -> Result, OnlineClientError> { + /// + /// **Note:** This will only work if the local node is an archive node. + #[cfg(all(feature = "jsonrpsee", feature = "runtime"))] + pub async fn new(config: T) -> Result, OnlineClientError> { let url = "ws://127.0.0.1:9944"; - OnlineClient::from_url(url).await + OnlineClient::from_url(config, url).await } /// Construct a new [`OnlineClient`], providing a URL to connect to. - pub async fn from_url(url: impl AsRef) -> Result, OnlineClientError> { - subxt_rpcs::utils::validate_url_is_secure(url.as_ref())?; - OnlineClient::from_insecure_url(url).await + #[cfg(all(feature = "jsonrpsee", feature = "runtime"))] + pub async fn from_url( + config: T, + url: impl AsRef, + ) -> Result, OnlineClientError> { + let url_str = url.as_ref(); + let url = url::Url::parse(url_str).map_err(|_| OnlineClientError::InvalidUrl { + url: url_str.to_string(), + })?; + if !Self::is_url_secure(&url) { + return Err(OnlineClientError::RpcError(subxt_rpcs::Error::InsecureUrl( + url_str.to_string(), + ))); + } + OnlineClient::from_insecure_url(config, url).await } /// Construct a new [`OnlineClient`], providing a URL to connect to. /// /// Allows insecure URLs without SSL encryption, e.g. (http:// and ws:// URLs). + #[cfg(all(feature = "jsonrpsee", feature = "runtime"))] pub async fn from_insecure_url( + config: T, url: impl AsRef, ) -> Result, OnlineClientError> { - let client = RpcClient::from_insecure_url(url).await?; - let backend = LegacyBackend::builder().build(client); - OnlineClient::from_backend(Arc::new(backend)).await + let rpc_client = RpcClient::from_insecure_url(url).await?; + OnlineClient::from_rpc_client(config, rpc_client).await + } + + fn is_url_secure(url: &url::Url) -> bool { + let secure_scheme = url.scheme() == "https" || url.scheme() == "wss"; + let is_localhost = url.host().is_some_and(|e| match e { + url::Host::Domain(e) => e == "localhost", + url::Host::Ipv4(e) => e.is_loopback(), + url::Host::Ipv6(e) => e.is_loopback(), + }); + secure_scheme || is_localhost } -} -impl OnlineClient { /// Construct a new [`OnlineClient`] by providing an [`RpcClient`] to drive the connection. /// This will use the current default [`Backend`], which may change in future releases. + #[cfg(all(feature = "jsonrpsee", feature = "runtime"))] pub async fn from_rpc_client( + config: T, rpc_client: impl Into, ) -> Result, OnlineClientError> { let rpc_client = rpc_client.into(); - let backend = Arc::new(LegacyBackend::builder().build(rpc_client)); - OnlineClient::from_backend(backend).await - } - - /// Construct a new [`OnlineClient`] by providing an RPC client along with the other - /// necessary details. This will use the current default [`Backend`], which may change - /// in future releases. - /// - /// # Warning - /// - /// This is considered the most primitive and also error prone way to - /// instantiate a client; the genesis hash, metadata and runtime version provided will - /// entirely determine which node and blocks this client will be able to interact with, - /// and whether it will be able to successfully do things like submit transactions. - /// - /// If you're unsure what you're doing, prefer one of the alternate methods to instantiate - /// a client. - pub fn from_rpc_client_with( - genesis_hash: HashFor, - runtime_version: RuntimeVersion, - metadata: impl Into, - rpc_client: impl Into, - ) -> Result, OnlineClientError> { - let rpc_client = rpc_client.into(); - let backend = Arc::new(LegacyBackend::builder().build(rpc_client)); - OnlineClient::from_backend_with(genesis_hash, runtime_version, metadata, backend) + let backend = CombinedBackend::builder() + .build_with_background_driver(rpc_client) + .await + .map_err(OnlineClientError::CannotBuildCombinedBackend)?; + let backend: Arc> = Arc::new(backend); + OnlineClient::from_backend(config, backend).await } /// Construct a new [`OnlineClient`] by providing an underlying [`Backend`] - /// implementation to power it. Other details will be obtained from the chain. - pub async fn from_backend>( - backend: Arc, + /// implementation to power it. + pub async fn from_backend( + config: T, + backend: impl Into>>, ) -> Result, OnlineClientError> { - let latest_block = backend - .latest_finalized_block_ref() - .await - .map_err(OnlineClientError::CannotGetLatestFinalizedBlock)?; - - let (genesis_hash, runtime_version, metadata) = future::join3( - backend + let backend = backend.into(); + let genesis_hash = match config.genesis_hash() { + Some(hash) => hash, + None => backend .genesis_hash() - .map_err(OnlineClientError::CannotGetGenesisHash), - backend - .current_runtime_version() - .map_err(OnlineClientError::CannotGetCurrentRuntimeVersion), - OnlineClient::fetch_metadata(&*backend, latest_block.hash()) - .map_err(OnlineClientError::CannotFetchMetadata), - ) - .await; - - OnlineClient::from_backend_with(genesis_hash?, runtime_version?, metadata?, backend) - } - - /// Construct a new [`OnlineClient`] by providing all of the underlying details needed - /// to make it work. - /// - /// # Warning - /// - /// This is considered the most primitive and also error prone way to - /// instantiate a client; the genesis hash, metadata and runtime version provided will - /// entirely determine which node and blocks this client will be able to interact with, - /// and whether it will be able to successfully do things like submit transactions. - /// - /// If you're unsure what you're doing, prefer one of the alternate methods to instantiate - /// a client. - pub fn from_backend_with>( - genesis_hash: HashFor, - runtime_version: RuntimeVersion, - metadata: impl Into, - backend: Arc, - ) -> Result, OnlineClientError> { - use subxt_core::config::Hasher; - - let metadata = metadata.into(); - let hasher = T::Hasher::new(&metadata); + .await + .map_err(OnlineClientError::CannotGetGenesisHash)?, + }; Ok(OnlineClient { - inner: Arc::new(RwLock::new(Inner { + inner: Arc::new(OnlineClientInner { + config, genesis_hash, - runtime_version, - metadata, - hasher, - })), - backend, + backend: backend.into(), + }), }) } - /// Fetch the metadata from substrate using the runtime API. - async fn fetch_metadata( - backend: &dyn Backend, - block_hash: HashFor, - ) -> Result { - #[cfg(feature = "unstable-metadata")] - { - /// The unstable metadata version number. - const UNSTABLE_METADATA_VERSION: u32 = u32::MAX; - - // Try to fetch the latest unstable metadata, if that fails fall back to - // fetching the latest stable metadata. - match backend - .metadata_at_version(UNSTABLE_METADATA_VERSION, block_hash) - .await - { - Ok(bytes) => Ok(bytes), - Err(_) => OnlineClient::fetch_latest_stable_metadata(backend, block_hash).await, - } - } - - #[cfg(not(feature = "unstable-metadata"))] - OnlineClient::fetch_latest_stable_metadata(backend, block_hash).await - } - - /// Fetch the latest stable metadata from the node. - async fn fetch_latest_stable_metadata( - backend: &dyn Backend, - block_hash: HashFor, - ) -> Result { - // The metadata versions we support in Subxt, from newest to oldest. - use subxt_metadata::SUPPORTED_METADATA_VERSIONS; - - // Try to fetch each version that we support in order from newest to oldest. - for version in SUPPORTED_METADATA_VERSIONS { - if let Ok(bytes) = backend.metadata_at_version(version, block_hash).await { - return Ok(bytes); - } - } - - // If that fails, fetch the metadata V14 using the old API. - backend.legacy_metadata(block_hash).await - } - - /// Create an object which can be used to keep the runtime up to date - /// in a separate thread. + /// Obtain a stream of all blocks imported by the node. /// - /// # Example - /// - /// ```rust,no_run,standalone_crate - /// # #[tokio::main] - /// # async fn main() { - /// use subxt::{ OnlineClient, PolkadotConfig }; - /// - /// let client = OnlineClient::::new().await.unwrap(); - /// - /// // high level API. - /// - /// let update_task = client.updater(); - /// tokio::spawn(async move { - /// update_task.perform_runtime_updates().await; - /// }); - /// - /// - /// // low level API. - /// - /// let updater = client.updater(); - /// tokio::spawn(async move { - /// let mut update_stream = updater.runtime_updates().await.unwrap(); - /// - /// while let Ok(update) = update_stream.next().await { - /// let version = update.runtime_version().spec_version; - /// - /// match updater.apply_update(update) { - /// Ok(()) => { - /// println!("Upgrade to version: {} successful", version) - /// } - /// Err(e) => { - /// println!("Upgrade to version {} failed {:?}", version, e); - /// } - /// }; - /// } - /// }); - /// # } - /// ``` - pub fn updater(&self) -> ClientRuntimeUpdater { - ClientRuntimeUpdater(self.clone()) - } + /// **Note:** You probably want to use [`Self::stream_blocks()`] most of + /// the time. Blocks returned here may be pruned at any time and become inaccessible, + /// leading to errors when trying to work with them. + pub async fn stream_all_blocks(&self) -> Result, BlocksError> { + // We need a hasher to know how to hash things. Thus, we need metadata to instantiate + // the hasher, so let's use the current block. + let current_block = self + .at_current_block() + .await + .map_err(BlocksError::CannotGetCurrentBlock)?; + let hasher = current_block.client.hasher.clone(); - /// Return the hasher configured for hashing blocks and extrinsics. - pub fn hasher(&self) -> T::Hasher { - self.inner.read().expect("shouldn't be poisoned").hasher - } - - /// Return the [`Metadata`] used in this client. - pub fn metadata(&self) -> Metadata { - let inner = self.inner.read().expect("shouldn't be poisoned"); - inner.metadata.clone() - } - - /// Change the [`Metadata`] used in this client. - /// - /// # Warning - /// - /// Setting custom metadata may leave Subxt unable to work with certain blocks, - /// subscribe to latest blocks or submit valid transactions. - pub fn set_metadata(&self, metadata: impl Into) { - let mut inner = self.inner.write().expect("shouldn't be poisoned"); - inner.metadata = metadata.into(); - } - - /// Return the genesis hash. - pub fn genesis_hash(&self) -> HashFor { - let inner = self.inner.read().expect("shouldn't be poisoned"); - inner.genesis_hash - } - - /// Change the genesis hash used in this client. - /// - /// # Warning - /// - /// Setting a custom genesis hash may leave Subxt unable to - /// submit valid transactions. - pub fn set_genesis_hash(&self, genesis_hash: HashFor) { - let mut inner = self.inner.write().expect("shouldn't be poisoned"); - inner.genesis_hash = genesis_hash; - } - - /// Return the runtime version. - pub fn runtime_version(&self) -> RuntimeVersion { - let inner = self.inner.read().expect("shouldn't be poisoned"); - inner.runtime_version - } - - /// Change the [`RuntimeVersion`] used in this client. - /// - /// # Warning - /// - /// Setting a custom runtime version may leave Subxt unable to - /// submit valid transactions. - pub fn set_runtime_version(&self, runtime_version: RuntimeVersion) { - let mut inner = self.inner.write().expect("shouldn't be poisoned"); - inner.runtime_version = runtime_version; - } - - /// Return an RPC client to make raw requests with. - pub fn backend(&self) -> &dyn Backend { - &*self.backend - } - - /// Return an offline client with the same configuration as this. - pub fn offline(&self) -> OfflineClient { - let inner = self.inner.read().expect("shouldn't be poisoned"); - OfflineClient::new( - inner.genesis_hash, - inner.runtime_version, - inner.metadata.clone(), - ) - } - - // Just a copy of the most important trait methods so that people - // don't need to import the trait for most things: - - /// Work with transactions. - pub fn tx(&self) -> TxClient { - >::tx(self) - } - - /// Work with events. - pub fn events(&self) -> EventsClient { - >::events(self) - } - - /// Work with storage. - pub fn storage(&self) -> StorageClient { - >::storage(self) - } - - /// Access constants. - pub fn constants(&self) -> ConstantsClient { - >::constants(self) - } - - /// Work with blocks. - pub fn blocks(&self) -> BlocksClient { - >::blocks(self) - } - - /// Work with runtime API. - pub fn runtime_api(&self) -> RuntimeApiClient { - >::runtime_api(self) - } - - /// Work with View Functions. - pub fn view_functions(&self) -> ViewFunctionsClient { - >::view_functions(self) - } - - /// Access custom types. - pub fn custom_values(&self) -> CustomValuesClient { - >::custom_values(self) - } -} - -impl OfflineClientT for OnlineClient { - fn metadata(&self) -> Metadata { - self.metadata() - } - fn genesis_hash(&self) -> HashFor { - self.genesis_hash() - } - fn runtime_version(&self) -> RuntimeVersion { - self.runtime_version() - } - fn hasher(&self) -> T::Hasher { - self.hasher() - } - // This is provided by default, but we can optimise here and only lock once: - fn client_state(&self) -> ClientState { - let inner = self.inner.read().expect("shouldn't be poisoned"); - ClientState { - genesis_hash: inner.genesis_hash, - runtime_version: inner.runtime_version, - metadata: inner.metadata.clone(), - } - } -} - -impl OnlineClientT for OnlineClient { - fn backend(&self) -> &dyn Backend { - &*self.backend - } -} - -/// Client wrapper for performing runtime updates. See [`OnlineClient::updater()`] -/// for example usage. -pub struct ClientRuntimeUpdater(OnlineClient); - -impl ClientRuntimeUpdater { - fn is_runtime_version_different(&self, new: &RuntimeVersion) -> bool { - let curr = self.0.inner.read().expect("shouldn't be poisoned"); - &curr.runtime_version != new - } - - fn do_update(&self, update: Update) { - let mut writable = self.0.inner.write().expect("shouldn't be poisoned"); - writable.metadata = update.metadata; - writable.runtime_version = update.runtime_version; - } - - /// Tries to apply a new update. - pub fn apply_update(&self, update: Update) -> Result<(), RuntimeUpdateeApplyError> { - if !self.is_runtime_version_different(&update.runtime_version) { - return Err(RuntimeUpdateeApplyError::SameVersion); - } - - self.do_update(update); - - Ok(()) - } - - /// Performs runtime updates indefinitely unless encountering an error. - /// - /// *Note:* This will run indefinitely until it errors, so the typical usage - /// would be to run it in a separate background task. - pub async fn perform_runtime_updates(&self) -> Result<(), RuntimeUpdaterError> { - // Obtain an update subscription to further detect changes in the runtime version of the node. - let mut runtime_version_stream = self.runtime_updates().await?; - - loop { - let update = runtime_version_stream.next().await?; - - // This only fails if received the runtime version is the same the current runtime version - // which might occur because that runtime subscriptions in substrate sends out the initial - // value when they created and not only when runtime upgrades occurs. - // Thus, fine to ignore here as it strictly speaking isn't really an error - let _ = self.apply_update(update); - } - } - - /// Low-level API to get runtime updates as a stream but it's doesn't check if the - /// runtime version is newer or updates the runtime. - /// - /// Instead that's up to the user of this API to decide when to update and - /// to perform the actual updating. - pub async fn runtime_updates(&self) -> Result, RuntimeUpdaterError> { let stream = self - .0 - .backend() - .stream_runtime_version() + .inner + .backend + .stream_all_block_headers(hasher) .await - .map_err(RuntimeUpdaterError::CannotStreamRuntimeVersion)?; + .map_err(BlocksError::CannotGetBlockHeaderStream)?; - Ok(RuntimeUpdaterStream { - stream, - client: self.0.clone(), - }) + Ok(Blocks::from_headers_stream(self.clone(), stream)) } -} -/// Stream to perform runtime upgrades. -pub struct RuntimeUpdaterStream { - stream: StreamOfResults, - client: OnlineClient, -} - -impl RuntimeUpdaterStream { - /// Wait for the next runtime update. - pub async fn next(&mut self) -> Result { - let runtime_version = self - .stream - .next() + /// Obtain a stream of blocks imported by the node onto the current best fork. + /// + /// **Note:** You probably want to use [`Self::stream_blocks()`] most of + /// the time. Blocks returned here may be pruned at any time and become inaccessible, + /// leading to errors when trying to work with them. + pub async fn stream_best_blocks(&self) -> Result, BlocksError> { + // We need a hasher to know how to hash things. Thus, we need metadata to instantiate + // the hasher, so let's use the current block. + let current_block = self + .at_current_block() .await - .ok_or(RuntimeUpdaterError::UnexpectedEndOfUpdateStream)? - .map_err(RuntimeUpdaterError::CannotGetNextRuntimeVersion)?; + .map_err(BlocksError::CannotGetCurrentBlock)?; + let hasher = current_block.client.hasher.clone(); - let at = wait_runtime_upgrade_in_finalized_block(&self.client, &runtime_version).await?; - - let metadata = OnlineClient::fetch_metadata(self.client.backend(), at.hash()) + let stream = self + .inner + .backend + .stream_best_block_headers(hasher) .await - .map_err(RuntimeUpdaterError::CannotFetchNewMetadata)?; + .map_err(BlocksError::CannotGetBlockHeaderStream)?; - Ok(Update { + Ok(Blocks::from_headers_stream(self.clone(), stream)) + } + + /// Obtain a stream of finalized blocks. + pub async fn stream_blocks(&self) -> Result, BlocksError> { + // We need a hasher to know how to hash things. Thus, we need metadata to instantiate + // the hasher, so let's use the current block. + let current_block = self + .at_current_block() + .await + .map_err(BlocksError::CannotGetCurrentBlock)?; + let hasher = current_block.client.hasher.clone(); + + let stream = self + .inner + .backend + .stream_finalized_block_headers(hasher) + .await + .map_err(BlocksError::CannotGetBlockHeaderStream)?; + + Ok(Blocks::from_headers_stream(self.clone(), stream)) + } + + /// Instantiate a client to work at the current finalized block _at the time of instantiation_. + /// This does not track new blocks. + pub async fn at_current_block( + &self, + ) -> Result>, OnlineClientAtBlockError> { + let latest_block = self + .inner + .backend + .latest_finalized_block_ref() + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetCurrentBlock { reason: e })?; + + self.at_block(latest_block).await + } + + /// Instantiate a client for working at a specific block. + pub async fn at_block( + &self, + number_or_hash: impl Into>, + ) -> Result>, OnlineClientAtBlockError> { + let number_or_hash = number_or_hash.into(); + + // We are given either a block hash or number. We need both. + let (block_ref, block_number) = match number_or_hash { + BlockNumberOrRef::BlockRef(block_ref) => { + let block_hash = block_ref.hash(); + let block_header = self + .inner + .backend + .block_header(block_hash) + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetBlockHeader { + block_hash: block_hash.into(), + reason: e, + })? + .ok_or(OnlineClientAtBlockError::BlockHeaderNotFound { + block_hash: block_hash.into(), + })?; + (block_ref, block_header.number()) + } + BlockNumberOrRef::Number(block_number) => { + let block_ref = self + .inner + .backend + .block_number_to_hash(block_number) + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetBlockHash { + block_number, + reason: e, + })? + .ok_or(OnlineClientAtBlockError::BlockNotFound { block_number })?; + (block_ref, block_number) + } + }; + + self.at_block_hash_and_number(block_ref, block_number).await + } + + /// Instantiate a client for working at a specific block. This takes a block hash/ref _and_ the + /// corresponding block number. When both are available, this saves an RPC call to obtain one from + /// the other. + /// + /// **Warning:** If the block hash and number do not align, then things will go wrong. Prefer to + /// use [`Self::at_block`] if in any doubt. + pub async fn at_block_hash_and_number( + &self, + block_ref: impl Into>>, + block_number: u64, + ) -> Result>, OnlineClientAtBlockError> { + let block_ref = block_ref.into(); + let block_hash = block_ref.hash(); + + // Obtain the spec version so that we know which metadata to use at this block. + // Obtain the transaction version because it's required for constructing extrinsics. + let (spec_version, transaction_version) = match self + .inner + .config + .spec_and_transaction_version_for_block_number(block_number) + { + Some(version) => version, + None => { + let spec_version_bytes = self + .inner + .backend + .call("Core_version", None, block_hash) + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetSpecVersion { + block_hash: block_hash.into(), + reason: e, + })?; + + #[derive(codec::Decode)] + struct SpecVersionHeader { + _spec_name: String, + _impl_name: String, + _authoring_version: u32, + spec_version: u32, + _impl_version: u32, + _apis: Vec<([u8; 8], u32)>, + transaction_version: u32, + } + let version = + SpecVersionHeader::decode(&mut &spec_version_bytes[..]).map_err(|e| { + OnlineClientAtBlockError::CannotDecodeSpecVersion { + block_hash: block_hash.into(), + reason: e, + } + })?; + (version.spec_version, version.transaction_version) + } + }; + + // Obtain the metadata for the block. Allow our config to cache it. + let metadata = match self.inner.config.metadata_for_spec_version(spec_version) { + Some(metadata) => metadata, + None => { + let metadata: Metadata = + match get_metadata(&*self.inner.backend, block_hash).await? { + m @ RuntimeMetadata::V0(_) + | m @ RuntimeMetadata::V1(_) + | m @ RuntimeMetadata::V2(_) + | m @ RuntimeMetadata::V3(_) + | m @ RuntimeMetadata::V4(_) + | m @ RuntimeMetadata::V5(_) + | m @ RuntimeMetadata::V6(_) + | m @ RuntimeMetadata::V7(_) => { + return Err(OnlineClientAtBlockError::UnsupportedMetadataVersion { + block_hash: block_hash.into(), + version: m.version(), + }); + } + RuntimeMetadata::V8(m) => { + let types = get_legacy_types(self, &m, spec_version)?; + Metadata::from_v8(&m, &types).map_err(|e| { + OnlineClientAtBlockError::CannotConvertLegacyMetadata { + block_hash: block_hash.into(), + metadata_version: 8, + reason: e, + } + })? + } + RuntimeMetadata::V9(m) => { + let types = get_legacy_types(self, &m, spec_version)?; + Metadata::from_v9(&m, &types).map_err(|e| { + OnlineClientAtBlockError::CannotConvertLegacyMetadata { + block_hash: block_hash.into(), + metadata_version: 9, + reason: e, + } + })? + } + RuntimeMetadata::V10(m) => { + let types = get_legacy_types(self, &m, spec_version)?; + Metadata::from_v10(&m, &types).map_err(|e| { + OnlineClientAtBlockError::CannotConvertLegacyMetadata { + block_hash: block_hash.into(), + metadata_version: 10, + reason: e, + } + })? + } + RuntimeMetadata::V11(m) => { + let types = get_legacy_types(self, &m, spec_version)?; + Metadata::from_v11(&m, &types).map_err(|e| { + OnlineClientAtBlockError::CannotConvertLegacyMetadata { + block_hash: block_hash.into(), + metadata_version: 11, + reason: e, + } + })? + } + RuntimeMetadata::V12(m) => { + let types = get_legacy_types(self, &m, spec_version)?; + Metadata::from_v12(&m, &types).map_err(|e| { + OnlineClientAtBlockError::CannotConvertLegacyMetadata { + block_hash: block_hash.into(), + metadata_version: 12, + reason: e, + } + })? + } + RuntimeMetadata::V13(m) => { + let types = get_legacy_types(self, &m, spec_version)?; + Metadata::from_v13(&m, &types).map_err(|e| { + OnlineClientAtBlockError::CannotConvertLegacyMetadata { + block_hash: block_hash.into(), + metadata_version: 13, + reason: e, + } + })? + } + RuntimeMetadata::V14(m) => Metadata::from_v14(m).map_err(|e| { + OnlineClientAtBlockError::CannotConvertModernMetadata { + block_hash: block_hash.into(), + metadata_version: 14, + reason: e, + } + })?, + RuntimeMetadata::V15(m) => Metadata::from_v15(m).map_err(|e| { + OnlineClientAtBlockError::CannotConvertModernMetadata { + block_hash: block_hash.into(), + metadata_version: 15, + reason: e, + } + })?, + RuntimeMetadata::V16(m) => Metadata::from_v16(m).map_err(|e| { + OnlineClientAtBlockError::CannotConvertModernMetadata { + block_hash: block_hash.into(), + metadata_version: 16, + reason: e, + } + })?, + }; + let metadata = Arc::new(metadata); + self.inner + .config + .set_metadata_for_spec_version(spec_version, metadata.clone()); + metadata + } + }; + + let online_client_at_block = OnlineClientAtBlock { + client: self.clone(), + hasher: ::new(&metadata), metadata, - runtime_version, + block_ref, + block_number, + spec_version, + transaction_version, + }; + + Ok(ClientAtBlock { + client: online_client_at_block, + marker: PhantomData, }) } } -/// Represents the state when a runtime upgrade occurred. -pub struct Update { - runtime_version: RuntimeVersion, - metadata: Metadata, +/// This represents an online client at a specific block. +#[doc(hidden)] +pub trait OnlineClientAtBlockT: OfflineClientAtBlockT { + /// Return the RPC methods we'll use to interact with the node. + fn backend(&self) -> &dyn Backend; + /// Return the block hash for the current block. + fn block_hash(&self) -> HashFor; + /// Point at a new block. + fn at_block( + &self, + number_or_hash: BlockNumberOrRef, + ) -> impl Future, OnlineClientAtBlockError>>; } -impl Update { - /// Get the runtime version. - pub fn runtime_version(&self) -> &RuntimeVersion { - &self.runtime_version - } +/// The inner type providing the necessary data to work online at a specific block. +#[derive(Clone)] +pub struct OnlineClientAtBlock { + client: OnlineClient, + metadata: ArcMetadata, + hasher: T::Hasher, + block_ref: BlockRef>, + block_number: u64, + spec_version: u32, + transaction_version: u32, +} - /// Get the metadata. - pub fn metadata(&self) -> &Metadata { +impl OnlineClientAtBlockT for OnlineClientAtBlock { + fn backend(&self) -> &dyn Backend { + &*self.client.inner.backend + } + fn block_hash(&self) -> HashFor { + self.block_ref.hash() + } + async fn at_block( + &self, + number_or_hash: BlockNumberOrRef, + ) -> Result, OnlineClientAtBlockError> { + self.client.at_block(number_or_hash).await + } +} + +impl OfflineClientAtBlockT for OnlineClientAtBlock { + fn metadata_ref(&self) -> &Metadata { &self.metadata } + fn metadata(&self) -> ArcMetadata { + self.metadata.clone() + } + fn block_number(&self) -> u64 { + self.block_number + } + fn genesis_hash(&self) -> Option> { + Some(self.client.inner.genesis_hash) + } + fn spec_version(&self) -> u32 { + self.spec_version + } + fn transaction_version(&self) -> u32 { + self.transaction_version + } + fn hasher(&self) -> &T::Hasher { + &self.hasher + } } -/// Helper to wait until the runtime upgrade is applied on at finalized block. -async fn wait_runtime_upgrade_in_finalized_block( - client: &OnlineClient, - runtime_version: &RuntimeVersion, -) -> Result>, RuntimeUpdaterError> { - let hasher = client +fn get_legacy_types<'a, T: Config, Md: ToTypeRegistry>( + client: &'a OnlineClient, + metadata: &Md, + spec_version: u32, +) -> Result, OnlineClientAtBlockError> { + let mut types = client .inner - .read() - .expect("Lock shouldn't be poisoned") - .hasher; + .config + .legacy_types_for_spec_version(spec_version) + .ok_or(OnlineClientAtBlockError::MissingLegacyTypes)?; - let mut block_sub = client - .backend() - .stream_finalized_block_headers(hasher) - .await - .map_err(RuntimeUpdaterError::CannotStreamFinalizedBlocks)?; + // Extend the types with information from the metadata (ie event/error/call enums): + let additional_types = frame_decode::helpers::type_registry_from_metadata(metadata) + .map_err(|e| OnlineClientAtBlockError::CannotInjectMetadataTypes { parse_error: e })?; + types.prepend(additional_types); - let block_ref = loop { - let (_, block_ref) = block_sub - .next() - .await - .ok_or(RuntimeUpdaterError::UnexpectedEndOfBlockStream)? - .map_err(RuntimeUpdaterError::CannotGetNextFinalizedBlock)?; - - let addr = - crate::dynamic::storage::<(), scale_value::Value>("System", "LastRuntimeUpgrade"); - - let client_at = client.storage().at(block_ref.hash()); - let value = client_at - .entry(addr) - // The storage `system::lastRuntimeUpgrade` should always exist. - // - .map_err(|_| RuntimeUpdaterError::CantFindSystemLastRuntimeUpgrade)? - .fetch(()) - .await - .map_err(RuntimeUpdaterError::CantFetchLastRuntimeUpgrade)? - .decode_as::() - .map_err(RuntimeUpdaterError::CannotDecodeLastRuntimeUpgrade)?; - - #[derive(scale_decode::DecodeAsType)] - struct LastRuntimeUpgrade { - spec_version: u32, - } - - // We are waiting for the chain to have the same spec version - // as sent out via the runtime subscription. - if value.spec_version == runtime_version.spec_version { - break block_ref; - } - }; - - Ok(block_ref) + Ok(types) +} + +async fn get_metadata( + backend: &dyn Backend, + block_hash: HashFor, +) -> Result { + // First, try to use the "modern" metadata APIs to get the most recent version we can. + let version_to_get = backend + .call("Metadata_metadata_versions", None, block_hash) + .await + .ok() + .and_then(|res| >::decode(&mut &res[..]).ok()) + .and_then(|versions| { + // We want to filter out the "unstable" version, which is represented by u32::MAX. + versions.into_iter().filter(|v| *v != u32::MAX).max() + }); + + // We had success calling the above API, so we expect the "modern" metadata API to work. + if let Some(version_to_get) = version_to_get { + let version_bytes = version_to_get.encode(); + let rpc_response = backend + .call( + "Metadata_metadata_at_version", + Some(&version_bytes), + block_hash, + ) + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.into(), + reason: format!("Error calling Metadata_metadata_at_version: {e}"), + })?; + + // Option because we may have asked for a version that doesn't exist. Compact because we get back a Vec + // of the metadata bytes, and the Vec is preceded by it's compact encoded length. The actual bytes are then + // decoded as a `RuntimeMetadataPrefixed`, after this. + let (_, metadata) = , RuntimeMetadataPrefixed)>>::decode(&mut &rpc_response[..]) + .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.into(), + reason: format!("Error decoding response for Metadata_metadata_at_version: {e}"), + })? + .ok_or_else(|| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.into(), + reason: format!("No metadata returned for the latest version from Metadata_metadata_versions ({version_to_get})"), + })?; + + return Ok(metadata.1); + } + + // We didn't get a version from Metadata_metadata_versions, so fall back to the "old" API. + let metadata_bytes = backend + .call("Metadata_metadata", None, block_hash) + .await + .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.into(), + reason: format!("Error calling Metadata_metadata: {e}"), + })?; + + let (_, metadata) = <(Compact, RuntimeMetadataPrefixed)>::decode(&mut &metadata_bytes[..]) + .map_err(|e| OnlineClientAtBlockError::CannotGetMetadata { + block_hash: block_hash.into(), + reason: format!("Error decoding response for Metadata_metadata: {e}"), + })?; + + Ok(metadata.1) } diff --git a/new/src/client/online_client/block_number_or_ref.rs b/subxt/src/client/online_client/block_number_or_ref.rs similarity index 100% rename from new/src/client/online_client/block_number_or_ref.rs rename to subxt/src/client/online_client/block_number_or_ref.rs diff --git a/new/src/client/online_client/blocks.rs b/subxt/src/client/online_client/blocks.rs similarity index 100% rename from new/src/client/online_client/blocks.rs rename to subxt/src/client/online_client/blocks.rs diff --git a/new/src/config.rs b/subxt/src/config.rs similarity index 97% rename from new/src/config.rs rename to subxt/src/config.rs index 527e509941..3cec090d5d 100644 --- a/new/src/config.rs +++ b/subxt/src/config.rs @@ -15,14 +15,14 @@ pub mod polkadot; pub mod substrate; pub mod transaction_extensions; +use crate::metadata::{ArcMetadata, Metadata}; use codec::{Decode, Encode}; use core::fmt::Debug; use scale_decode::DecodeAsType; use scale_encode::EncodeAsType; use scale_info_legacy::TypeRegistrySet; use serde::{Serialize, de::DeserializeOwned}; -use std::{fmt::Display, marker::PhantomData, sync::Arc}; -use subxt_metadata::Metadata; +use std::{fmt::Display, marker::PhantomData}; use subxt_rpcs::RpcConfig; pub use default_extrinsic_params::{DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder}; @@ -84,14 +84,14 @@ pub trait Config: Clone + Debug + Sized + Send + Sync + 'static { /// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here, and then /// call [`Config::set_metadata_for_spec_version`] to give the configuration the opportunity to cache it. /// The [`crate::client::OfflineClient`] will error if this is not available for the required spec version. - fn metadata_for_spec_version(&self, _spec_version: u32) -> Option> { + fn metadata_for_spec_version(&self, _spec_version: u32) -> Option { None } /// Set some metadata for a given spec version. the [`crate::client::OnlineClient`] will call this if it has /// to retrieve metadata from the chain, to give this the opportunity to cache it. The configuration can /// do nothing if it prefers. - fn set_metadata_for_spec_version(&self, _spec_version: u32, _metadata: Arc) {} + fn set_metadata_for_spec_version(&self, _spec_version: u32, _metadata: ArcMetadata) {} /// Return legacy types (ie types to use with Runtimes that return pre-V14 metadata) for a given spec version. /// If this returns `None`, [`subxt`] will return an error if type definitions are needed to access some older diff --git a/new/src/config/default_extrinsic_params.rs b/subxt/src/config/default_extrinsic_params.rs similarity index 100% rename from new/src/config/default_extrinsic_params.rs rename to subxt/src/config/default_extrinsic_params.rs diff --git a/new/src/config/extrinsic_params.rs b/subxt/src/config/extrinsic_params.rs similarity index 97% rename from new/src/config/extrinsic_params.rs rename to subxt/src/config/extrinsic_params.rs index 3a7f5b5980..e20cc1059a 100644 --- a/new/src/config/extrinsic_params.rs +++ b/subxt/src/config/extrinsic_params.rs @@ -7,13 +7,10 @@ //! [`crate::config::DefaultExtrinsicParams`] provides a general-purpose //! implementation of this that will work in many cases. -use crate::{ - config::{Config, HashFor}, - error::ExtrinsicParamsError, -}; +use crate::config::{Config, HashFor}; +use crate::error::ExtrinsicParamsError; +use crate::metadata::ArcMetadata; use core::any::Any; -use std::sync::Arc; -use subxt_metadata::Metadata; /// This provides access to some relevant client state in transaction extensions, /// and is just a combination of some of the available properties. @@ -26,7 +23,7 @@ pub struct ClientState { /// Transaction version. pub transaction_version: u32, /// Metadata. - pub metadata: Arc, + pub metadata: ArcMetadata, } /// This trait allows you to configure the "signed extra" and diff --git a/new/src/config/polkadot.rs b/subxt/src/config/polkadot.rs similarity index 87% rename from new/src/config/polkadot.rs rename to subxt/src/config/polkadot.rs index 3c5e5435e7..ae8ec2e8c5 100644 --- a/new/src/config/polkadot.rs +++ b/subxt/src/config/polkadot.rs @@ -7,9 +7,8 @@ use super::{Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder}; use crate::config::substrate::{SubstrateConfig, SubstrateConfigBuilder}; +use crate::metadata::ArcMetadata; use scale_info_legacy::TypeRegistrySet; -use std::sync::Arc; -use subxt_metadata::Metadata; pub use crate::config::substrate::{SpecVersionForRange, SubstrateHeader}; pub use crate::utils::{AccountId32, MultiAddress, MultiSignature}; @@ -18,6 +17,12 @@ pub use primitive_types::{H256, U256}; /// Construct a [`PolkadotConfig`] using this. pub struct PolkadotConfigBuilder(SubstrateConfigBuilder); +impl Default for PolkadotConfigBuilder { + fn default() -> Self { + Self::new() + } +} + impl PolkadotConfigBuilder { /// Create a new [`PolkadotConfigBuilder`]. pub fn new() -> Self { @@ -30,7 +35,7 @@ impl PolkadotConfigBuilder { /// Set the metadata to be used for decoding blocks at the given spec versions. pub fn set_metadata_for_spec_versions( mut self, - ranges: impl Iterator)>, + ranges: impl Iterator, ) -> Self { self = Self(self.0.set_metadata_for_spec_versions(ranges)); self @@ -56,6 +61,18 @@ impl PolkadotConfigBuilder { #[derive(Debug, Clone)] pub struct PolkadotConfig(SubstrateConfig); +impl PolkadotConfig { + /// Create a new, default, [`PolkadotConfig`]. + pub fn new() -> Self { + Self::builder().build() + } + + /// Build a new [`PolkadotConfig`]. + pub fn builder() -> PolkadotConfigBuilder { + PolkadotConfigBuilder(SubstrateConfig::builder()) + } +} + impl Config for PolkadotConfig { type AccountId = ::AccountId; type Signature = ::Signature; @@ -87,11 +104,11 @@ impl Config for PolkadotConfig { .spec_and_transaction_version_for_block_number(block_number) } - fn metadata_for_spec_version(&self, spec_version: u32) -> Option> { + fn metadata_for_spec_version(&self, spec_version: u32) -> Option { self.0.metadata_for_spec_version(spec_version) } - fn set_metadata_for_spec_version(&self, spec_version: u32, metadata: Arc) { + fn set_metadata_for_spec_version(&self, spec_version: u32, metadata: ArcMetadata) { self.0.set_metadata_for_spec_version(spec_version, metadata) } } diff --git a/new/src/config/substrate.rs b/subxt/src/config/substrate.rs similarity index 96% rename from new/src/config/substrate.rs rename to subxt/src/config/substrate.rs index 360f4e47e7..03b7dcb217 100644 --- a/new/src/config/substrate.rs +++ b/subxt/src/config/substrate.rs @@ -6,6 +6,7 @@ use super::{Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, Hasher, Header}; use crate::config::Hash; +use crate::metadata::{ArcMetadata, Metadata}; use crate::utils::RangeMap; pub use crate::utils::{AccountId32, MultiAddress, MultiSignature}; use codec::{Decode, Encode}; @@ -15,14 +16,13 @@ use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::sync::Arc; use std::sync::Mutex; -use subxt_metadata::Metadata; /// Construct a [`SubstrateConfig`] using this. pub struct SubstrateConfigBuilder { legacy_types: Option, spec_and_transaction_version_for_block_number: RangeMap, genesis_hash: Option, - metadata_for_spec_version: Mutex>>, + metadata_for_spec_version: Mutex>, use_old_v9_hashers_before_spec_version: u32, } @@ -60,7 +60,7 @@ impl SubstrateConfigBuilder { /// Set the metadata to be used for decoding blocks at the given spec versions. pub fn set_metadata_for_spec_versions( self, - ranges: impl Iterator)>, + ranges: impl Iterator, ) -> Self { let mut map = self.metadata_for_spec_version.lock().unwrap(); for (spec_version, metadata) in ranges { @@ -132,10 +132,18 @@ pub struct SubstrateConfig { struct SubstrateConfigInner { legacy_types: Option, spec_and_transaction_version_for_block_number: RangeMap, - metadata_for_spec_version: Mutex>>, + metadata_for_spec_version: Mutex>, } impl SubstrateConfig { + /// Create a new, default, [`SubstrateConfig`]. This does not + /// support working with historic (pre-V14) types. If you want this, + /// then use [`SubstrateConfig::builder()`] and then provide legacy + /// types via [`SubstrateConfigBuilder::set_legacy_types()`]. + pub fn new() -> Self { + Self::builder().build() + } + /// Build a new [`SubstrateConfig`]. pub fn builder() -> SubstrateConfigBuilder { SubstrateConfigBuilder::new() @@ -168,7 +176,7 @@ impl Config for SubstrateConfig { .copied() } - fn metadata_for_spec_version(&self, spec_version: u32) -> Option> { + fn metadata_for_spec_version(&self, spec_version: u32) -> Option { self.inner .metadata_for_spec_version .lock() @@ -177,7 +185,7 @@ impl Config for SubstrateConfig { .cloned() } - fn set_metadata_for_spec_version(&self, spec_version: u32, metadata: Arc) { + fn set_metadata_for_spec_version(&self, spec_version: u32, metadata: ArcMetadata) { self.inner .metadata_for_spec_version .lock() diff --git a/new/src/config/transaction_extensions.rs b/subxt/src/config/transaction_extensions.rs similarity index 100% rename from new/src/config/transaction_extensions.rs rename to subxt/src/config/transaction_extensions.rs diff --git a/new/src/constants.rs b/subxt/src/constants.rs similarity index 100% rename from new/src/constants.rs rename to subxt/src/constants.rs diff --git a/new/src/constants/address.rs b/subxt/src/constants/address.rs similarity index 100% rename from new/src/constants/address.rs rename to subxt/src/constants/address.rs diff --git a/subxt/src/constants/constants_client.rs b/subxt/src/constants/constants_client.rs deleted file mode 100644 index 26baa4ab49..0000000000 --- a/subxt/src/constants/constants_client.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::{Config, client::OfflineClientT, error::ConstantError}; -use derive_where::derive_where; -use subxt_core::constants::address::Address; - -/// A client for accessing constants. -#[derive_where(Clone; Client)] -pub struct ConstantsClient { - client: Client, - _marker: std::marker::PhantomData, -} - -impl ConstantsClient { - /// Create a new [`ConstantsClient`]. - pub fn new(client: Client) -> Self { - Self { - client, - _marker: std::marker::PhantomData, - } - } -} - -impl> ConstantsClient { - /// Run the validation logic against some constant address you'd like to access. Returns `Ok(())` - /// if the address is valid (or if it's not possible to check since the address has no validation hash). - /// Return an error if the address was not valid or something went wrong trying to validate it (ie - /// the pallet or constant in question do not exist at all). - pub fn validate(&self, address: Addr) -> Result<(), ConstantError> { - let metadata = self.client.metadata(); - subxt_core::constants::validate(address, &metadata) - } - - /// Access the constant at the address given, returning the type defined by this address. - /// This is probably used with addresses given from static codegen, although you can manually - /// construct your own, too. - pub fn at(&self, address: Addr) -> Result { - let metadata = self.client.metadata(); - subxt_core::constants::get(address, &metadata) - } - - /// Access the bytes of a constant by the address it is registered under. - pub fn bytes_at(&self, address: Addr) -> Result, ConstantError> { - let metadata = self.client.metadata(); - subxt_core::constants::get_bytes(address, &metadata) - } -} diff --git a/subxt/src/constants/mod.rs b/subxt/src/constants/mod.rs deleted file mode 100644 index b9b3e9380a..0000000000 --- a/subxt/src/constants/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Types associated with accessing constants. - -mod constants_client; - -pub use constants_client::ConstantsClient; -pub use subxt_core::constants::address::{Address, DynamicAddress, StaticAddress, dynamic}; diff --git a/new/src/custom_values.rs b/subxt/src/custom_values.rs similarity index 100% rename from new/src/custom_values.rs rename to subxt/src/custom_values.rs diff --git a/new/src/custom_values/address.rs b/subxt/src/custom_values/address.rs similarity index 100% rename from new/src/custom_values/address.rs rename to subxt/src/custom_values/address.rs diff --git a/subxt/src/custom_values/custom_values_client.rs b/subxt/src/custom_values/custom_values_client.rs deleted file mode 100644 index 0414b0a2d1..0000000000 --- a/subxt/src/custom_values/custom_values_client.rs +++ /dev/null @@ -1,134 +0,0 @@ -use crate::client::OfflineClientT; -use crate::{Config, error::CustomValueError}; -use derive_where::derive_where; - -use subxt_core::custom_values::address::{Address, Maybe}; - -/// A client for accessing custom values stored in the metadata. -#[derive_where(Clone; Client)] -pub struct CustomValuesClient { - client: Client, - _marker: std::marker::PhantomData, -} - -impl CustomValuesClient { - /// Create a new [`CustomValuesClient`]. - pub fn new(client: Client) -> Self { - Self { - client, - _marker: std::marker::PhantomData, - } - } -} - -impl> CustomValuesClient { - /// Access a custom value by the address it is registered under. This can be just a [str] to get back a dynamic value, - /// or a static address from the generated static interface to get a value of a static type returned. - pub fn at>( - &self, - address: Addr, - ) -> Result { - subxt_core::custom_values::get(address, &self.client.metadata()) - } - - /// Access the bytes of a custom value by the address it is registered under. - pub fn bytes_at(&self, address: Addr) -> Result, CustomValueError> { - subxt_core::custom_values::get_bytes(address, &self.client.metadata()) - } - - /// Run the validation logic against some custom value address you'd like to access. Returns `Ok(())` - /// if the address is valid (or if it's not possible to check since the address has no validation hash). - /// Returns an error if the address was not valid (wrong name, type or raw bytes) - pub fn validate(&self, address: Addr) -> Result<(), CustomValueError> { - subxt_core::custom_values::validate(address, &self.client.metadata()) - } -} - -#[cfg(test)] -mod tests { - use crate::custom_values::{self, CustomValuesClient}; - use crate::{Metadata, OfflineClient, SubstrateConfig}; - use codec::Encode; - use scale_decode::DecodeAsType; - use scale_info::TypeInfo; - use scale_info::form::PortableForm; - use std::collections::BTreeMap; - use subxt_core::client::RuntimeVersion; - - #[derive(Debug, Clone, PartialEq, Eq, Encode, TypeInfo, DecodeAsType)] - pub struct Person { - age: u16, - name: String, - } - - fn mock_metadata() -> Metadata { - let person_ty = scale_info::MetaType::new::(); - let unit = scale_info::MetaType::new::<()>(); - let mut types = scale_info::Registry::new(); - let person_ty_id = types.register_type(&person_ty); - let unit_id = types.register_type(&unit); - let types: scale_info::PortableRegistry = types.into(); - - let person = Person { - age: 42, - name: "Neo".into(), - }; - - let person_value_metadata: frame_metadata::v15::CustomValueMetadata = - frame_metadata::v15::CustomValueMetadata { - ty: person_ty_id, - value: person.encode(), - }; - - let frame_metadata = frame_metadata::v15::RuntimeMetadataV15 { - types, - pallets: vec![], - extrinsic: frame_metadata::v15::ExtrinsicMetadata { - version: 0, - address_ty: unit_id, - call_ty: unit_id, - signature_ty: unit_id, - extra_ty: unit_id, - signed_extensions: vec![], - }, - ty: unit_id, - apis: vec![], - outer_enums: frame_metadata::v15::OuterEnums { - call_enum_ty: unit_id, - event_enum_ty: unit_id, - error_enum_ty: unit_id, - }, - custom: frame_metadata::v15::CustomMetadata { - map: BTreeMap::from_iter([("Person".to_string(), person_value_metadata)]), - }, - }; - - let metadata: subxt_metadata::Metadata = frame_metadata.try_into().unwrap(); - metadata - } - - #[test] - fn test_decoding() { - let client = OfflineClient::::new( - Default::default(), - RuntimeVersion { - spec_version: 0, - transaction_version: 0, - }, - mock_metadata(), - ); - - let custom_value_client = CustomValuesClient::new(client); - assert!(custom_value_client.at("No one").is_err()); - - let person_addr = custom_values::dynamic::("Person"); - let person = custom_value_client.at(&person_addr).unwrap(); - assert_eq!( - person, - Person { - age: 42, - name: "Neo".into() - } - ) - } -} diff --git a/subxt/src/custom_values/mod.rs b/subxt/src/custom_values/mod.rs deleted file mode 100644 index e1f5d3a0d0..0000000000 --- a/subxt/src/custom_values/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Types associated with accessing custom types - -mod custom_values_client; - -pub use custom_values_client::CustomValuesClient; -pub use subxt_core::custom_values::address::{Address, DynamicAddress, StaticAddress, dynamic}; diff --git a/core/src/dynamic.rs b/subxt/src/dynamic.rs similarity index 70% rename from core/src/dynamic.rs rename to subxt/src/dynamic.rs index 27819620f4..dc579c734f 100644 --- a/core/src/dynamic.rs +++ b/subxt/src/dynamic.rs @@ -2,13 +2,12 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. -//! This module provides the entry points to create dynamic -//! transactions, storage and constant lookups. +//! This module ex-exports various helpers for constructing dynamic payloads/queries/addresses. -pub use scale_value::{At, Value}; +pub use scale_value::{At, Value, value}; // Submit dynamic transactions. -pub use crate::tx::payload::dynamic as tx; +pub use crate::transactions::payload::dynamic as transaction; // Lookup constants dynamically. pub use crate::constants::address::dynamic as constant; @@ -17,7 +16,7 @@ pub use crate::constants::address::dynamic as constant; pub use crate::storage::address::dynamic as storage; // Execute runtime API function call dynamically. -pub use crate::runtime_api::payload::dynamic as runtime_api_call; +pub use crate::runtime_apis::payload::dynamic as runtime_api_call; // Execute View Function API function call dynamically. pub use crate::view_functions::payload::dynamic as view_function_call; diff --git a/new/src/error.rs b/subxt/src/error.rs similarity index 99% rename from new/src/error.rs rename to subxt/src/error.rs index 0618ea12ba..ecfcc8263a 100644 --- a/new/src/error.rs +++ b/subxt/src/error.rs @@ -803,6 +803,8 @@ pub enum TransactionEventsError { block_hash: Hex, error: EventsError, }, + #[error("Could not instantiate a client at the required block to fetch events: {0}")] + CannotInstantiateClientAtBlock(OnlineClientAtBlockError), #[error("Could not fetch events for the submitted transaction: {error}")] CannotFetchEventsForTransaction { block_hash: Hex, diff --git a/subxt/src/error/dispatch_error.rs b/subxt/src/error/dispatch_error.rs index 4b27e63dfd..18e7ee412a 100644 --- a/subxt/src/error/dispatch_error.rs +++ b/subxt/src/error/dispatch_error.rs @@ -6,7 +6,7 @@ //! something fails in trying to submit/execute a transaction. use super::{DispatchErrorDecodeError, ModuleErrorDecodeError, ModuleErrorDetailsError}; -use crate::metadata::Metadata; +use crate::metadata::ArcMetadata; use core::fmt::Debug; use scale_decode::{DecodeAsType, TypeResolver, visitor::DecodeAsTypeResult}; use std::{borrow::Cow, marker::PhantomData}; @@ -133,7 +133,7 @@ pub enum TransactionalError { #[derive(Clone, thiserror::Error)] #[non_exhaustive] pub struct ModuleError { - metadata: Metadata, + metadata: ArcMetadata, /// Bytes representation: /// - `bytes[0]`: pallet index /// - `bytes[1]`: error index @@ -242,7 +242,7 @@ impl DispatchError { #[doc(hidden)] pub fn decode_from<'a>( bytes: impl Into>, - metadata: Metadata, + metadata: ArcMetadata, ) -> Result { let bytes = bytes.into(); let dispatch_error_ty_id = metadata diff --git a/subxt/src/error/mod.rs b/subxt/src/error/mod.rs deleted file mode 100644 index 52349f42da..0000000000 --- a/subxt/src/error/mod.rs +++ /dev/null @@ -1,702 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Types representing the errors that can be returned. - -mod dispatch_error; -mod hex; - -crate::macros::cfg_unstable_light_client! { - pub use subxt_lightclient::LightClientError; -} - -// Re-export dispatch error types: -pub use dispatch_error::{ - ArithmeticError, DispatchError, ModuleError, TokenError, TransactionalError, -}; - -// Re-expose the errors we use from other crates here: -pub use crate::Metadata; -pub use hex::Hex; -pub use scale_decode::Error as DecodeError; -pub use scale_encode::Error as EncodeError; -pub use subxt_metadata::TryFromError as MetadataTryFromError; - -// Re-export core error types we're just reusing. -pub use subxt_core::error::{ - ConstantError, - CustomValueError, - EventsError as CoreEventsError, - // These errors are exposed as-is: - ExtrinsicDecodeErrorAt, - // These errors are wrapped: - ExtrinsicError as CoreExtrinsicError, - RuntimeApiError as CoreRuntimeApiError, - StorageError as CoreStorageError, - StorageKeyError, - StorageValueError, - ViewFunctionError as CoreViewFunctionError, -}; - -/// A global error type. Any of the errors exposed here can convert into this -/// error via `.into()`, but this error isn't itself exposed from anything. -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum Error { - #[error(transparent)] - ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt), - #[error(transparent)] - ConstantError(#[from] ConstantError), - #[error(transparent)] - CustomValueError(#[from] CustomValueError), - #[error(transparent)] - StorageKeyError(#[from] StorageKeyError), - #[error(transparent)] - StorageValueError(#[from] StorageValueError), - #[error(transparent)] - BackendError(#[from] BackendError), - #[error(transparent)] - BlockError(#[from] BlockError), - #[error(transparent)] - AccountNonceError(#[from] AccountNonceError), - #[error(transparent)] - OnlineClientError(#[from] OnlineClientError), - #[error(transparent)] - RuntimeUpdaterError(#[from] RuntimeUpdaterError), - #[error(transparent)] - RuntimeUpdateeApplyError(#[from] RuntimeUpdateeApplyError), - #[error(transparent)] - RuntimeApiError(#[from] RuntimeApiError), - #[error(transparent)] - EventsError(#[from] EventsError), - #[error(transparent)] - ExtrinsicError(#[from] ExtrinsicError), - #[error(transparent)] - ViewFunctionError(#[from] ViewFunctionError), - #[error(transparent)] - TransactionProgressError(#[from] TransactionProgressError), - #[error(transparent)] - TransactionStatusError(#[from] TransactionStatusError), - #[error(transparent)] - TransactionEventsError(#[from] TransactionEventsError), - #[error(transparent)] - TransactionFinalizedSuccessError(#[from] TransactionFinalizedSuccessError), - #[error(transparent)] - ModuleErrorDetailsError(#[from] ModuleErrorDetailsError), - #[error(transparent)] - ModuleErrorDecodeError(#[from] ModuleErrorDecodeError), - #[error(transparent)] - DispatchErrorDecodeError(#[from] DispatchErrorDecodeError), - #[error(transparent)] - StorageError(#[from] StorageError), - // Dev note: Subxt doesn't directly return Raw* errors. These exist so that when - // users use common crates (like parity-scale-codec and subxt-rpcs), errors returned - // there can be handled automatically using ? when the expected error is subxt::Error. - #[error("Other RPC client error: {0}")] - OtherRpcClientError(#[from] subxt_rpcs::Error), - #[error("Other codec error: {0}")] - OtherCodecError(#[from] codec::Error), - #[cfg(feature = "unstable-light-client")] - #[error("Other light client error: {0}")] - OtherLightClientError(#[from] subxt_lightclient::LightClientError), - #[cfg(feature = "unstable-light-client")] - #[error("Other light client RPC error: {0}")] - OtherLightClientRpcError(#[from] subxt_lightclient::LightClientRpcError), - // Dev note: Nothing in subxt should ever emit this error. It can instead be used - // to easily map other errors into a subxt::Error for convenience. Some From impls - // make this automatic for common "other" error types. - #[error("Other error: {0}")] - Other(Box), -} - -impl From for Error { - fn from(value: std::convert::Infallible) -> Self { - match value {} - } -} - -impl Error { - /// Create a generic error. This is a quick workaround when you are using - /// [`Error`] and have a non-Subxt error to return. - pub fn other(error: E) -> Error { - Error::Other(Box::new(error)) - } - - /// Create a generic error from a string. This is a quick workaround when you are using - /// [`Error`] and have a non-Subxt error to return. - pub fn other_str(error: impl Into) -> Error { - #[derive(thiserror::Error, Debug, Clone)] - #[error("{0}")] - struct StrError(String); - Error::Other(Box::new(StrError(error.into()))) - } - - /// Checks whether the error was caused by a RPC re-connection. - pub fn is_disconnected_will_reconnect(&self) -> bool { - matches!( - self.backend_error(), - Some(BackendError::Rpc(RpcError::ClientError( - subxt_rpcs::Error::DisconnectedWillReconnect(_) - ))) - ) - } - - /// Checks whether the error was caused by a RPC request being rejected. - pub fn is_rpc_limit_reached(&self) -> bool { - matches!( - self.backend_error(), - Some(BackendError::Rpc(RpcError::LimitReached)) - ) - } - - fn backend_error(&self) -> Option<&BackendError> { - match self { - Error::BlockError(e) => e.backend_error(), - Error::AccountNonceError(e) => e.backend_error(), - Error::OnlineClientError(e) => e.backend_error(), - Error::RuntimeUpdaterError(e) => e.backend_error(), - Error::RuntimeApiError(e) => e.backend_error(), - Error::EventsError(e) => e.backend_error(), - Error::ExtrinsicError(e) => e.backend_error(), - Error::ViewFunctionError(e) => e.backend_error(), - Error::TransactionProgressError(e) => e.backend_error(), - Error::TransactionEventsError(e) => e.backend_error(), - Error::TransactionFinalizedSuccessError(e) => e.backend_error(), - Error::StorageError(e) => e.backend_error(), - // Any errors that **don't** return a BackendError anywhere will return None: - _ => None, - } - } -} - -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum BackendError { - #[error("Backend error: RPC error: {0}")] - Rpc(#[from] RpcError), - #[error("Backend error: Could not find metadata version {0}")] - MetadataVersionNotFound(u32), - #[error("Backend error: Could not codec::Decode Runtime API response: {0}")] - CouldNotScaleDecodeRuntimeResponse(codec::Error), - #[error("Backend error: Could not codec::Decode metadata bytes into subxt::Metadata: {0}")] - CouldNotDecodeMetadata(codec::Error), - // This is for errors in `Backend` implementations which aren't any of the "pre-defined" set above: - #[error("Custom backend error: {0}")] - Other(String), -} - -impl BackendError { - /// Checks whether the error was caused by a RPC re-connection. - pub fn is_disconnected_will_reconnect(&self) -> bool { - matches!( - self, - BackendError::Rpc(RpcError::ClientError( - subxt_rpcs::Error::DisconnectedWillReconnect(_) - )) - ) - } - - /// Checks whether the error was caused by a RPC request being rejected. - pub fn is_rpc_limit_reached(&self) -> bool { - matches!(self, BackendError::Rpc(RpcError::LimitReached)) - } -} - -impl From for BackendError { - fn from(value: subxt_rpcs::Error) -> Self { - BackendError::Rpc(RpcError::ClientError(value)) - } -} - -/// An RPC error. Since we are generic over the RPC client that is used, -/// the error is boxed and could be casted. -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum RpcError { - /// Error related to the RPC client. - #[error("RPC error: {0}")] - ClientError(#[from] subxt_rpcs::Error), - /// This error signals that we got back a [`subxt_rpcs::methods::chain_head::MethodResponse::LimitReached`], - /// which is not technically an RPC error but is treated as an error in our own APIs. - #[error("RPC error: limit reached")] - LimitReached, - /// The RPC subscription was dropped. - #[error("RPC error: subscription dropped.")] - SubscriptionDropped, -} - -/// Block error -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum BlockError { - #[error( - "Could not find the block body with hash {block_hash} (perhaps it was on a non-finalized fork?)" - )] - BlockNotFound { block_hash: Hex }, - #[error("Could not download the block header with hash {block_hash}: {reason}")] - CouldNotGetBlockHeader { - block_hash: Hex, - reason: BackendError, - }, - #[error("Could not download the latest block header: {0}")] - CouldNotGetLatestBlock(BackendError), - #[error("Could not subscribe to all blocks: {0}")] - CouldNotSubscribeToAllBlocks(BackendError), - #[error("Could not subscribe to best blocks: {0}")] - CouldNotSubscribeToBestBlocks(BackendError), - #[error("Could not subscribe to finalized blocks: {0}")] - CouldNotSubscribeToFinalizedBlocks(BackendError), - #[error("Error getting account nonce at block {block_hash}")] - AccountNonceError { - block_hash: Hex, - account_id: Hex, - reason: AccountNonceError, - }, -} - -impl BlockError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - BlockError::CouldNotGetBlockHeader { reason: e, .. } - | BlockError::CouldNotGetLatestBlock(e) - | BlockError::CouldNotSubscribeToAllBlocks(e) - | BlockError::CouldNotSubscribeToBestBlocks(e) - | BlockError::CouldNotSubscribeToFinalizedBlocks(e) => Some(e), - _ => None, - } - } -} - -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum AccountNonceError { - #[error("Could not retrieve account nonce: {0}")] - CouldNotRetrieve(#[from] BackendError), - #[error("Could not decode account nonce: {0}")] - CouldNotDecode(#[from] codec::Error), - #[error("Wrong number of account nonce bytes returned: {0} (expected 2, 4 or 8)")] - WrongNumberOfBytes(usize), -} - -impl AccountNonceError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - AccountNonceError::CouldNotRetrieve(e) => Some(e), - _ => None, - } - } -} - -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum OnlineClientError { - #[error("Cannot construct OnlineClient: {0}")] - RpcError(#[from] subxt_rpcs::Error), - #[error( - "Cannot construct OnlineClient: Cannot fetch latest finalized block to obtain init details from: {0}" - )] - CannotGetLatestFinalizedBlock(BackendError), - #[error("Cannot construct OnlineClient: Cannot fetch genesis hash: {0}")] - CannotGetGenesisHash(BackendError), - #[error("Cannot construct OnlineClient: Cannot fetch current runtime version: {0}")] - CannotGetCurrentRuntimeVersion(BackendError), - #[error("Cannot construct OnlineClient: Cannot fetch metadata: {0}")] - CannotFetchMetadata(BackendError), -} - -impl OnlineClientError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - OnlineClientError::CannotGetLatestFinalizedBlock(e) - | OnlineClientError::CannotGetGenesisHash(e) - | OnlineClientError::CannotGetCurrentRuntimeVersion(e) - | OnlineClientError::CannotFetchMetadata(e) => Some(e), - _ => None, - } - } -} - -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum RuntimeUpdaterError { - #[error("Error subscribing to runtime updates: The update stream ended unexpectedly")] - UnexpectedEndOfUpdateStream, - #[error("Error subscribing to runtime updates: The finalized block stream ended unexpectedly")] - UnexpectedEndOfBlockStream, - #[error("Error subscribing to runtime updates: Can't stream runtime version: {0}")] - CannotStreamRuntimeVersion(BackendError), - #[error("Error subscribing to runtime updates: Can't get next runtime version in stream: {0}")] - CannotGetNextRuntimeVersion(BackendError), - #[error("Error subscribing to runtime updates: Cannot stream finalized blocks: {0}")] - CannotStreamFinalizedBlocks(BackendError), - #[error("Error subscribing to runtime updates: Cannot get next finalized block in stream: {0}")] - CannotGetNextFinalizedBlock(BackendError), - #[error("Cannot fetch new metadata for runtime update: {0}")] - CannotFetchNewMetadata(BackendError), - #[error( - "Error subscribing to runtime updates: Cannot find the System.LastRuntimeUpgrade storage entry" - )] - CantFindSystemLastRuntimeUpgrade, - #[error("Error subscribing to runtime updates: Cannot fetch last runtime upgrade: {0}")] - CantFetchLastRuntimeUpgrade(StorageError), - #[error("Error subscribing to runtime updates: Cannot decode last runtime upgrade: {0}")] - CannotDecodeLastRuntimeUpgrade(StorageValueError), -} - -impl RuntimeUpdaterError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - RuntimeUpdaterError::CannotStreamRuntimeVersion(e) - | RuntimeUpdaterError::CannotGetNextRuntimeVersion(e) - | RuntimeUpdaterError::CannotStreamFinalizedBlocks(e) - | RuntimeUpdaterError::CannotGetNextFinalizedBlock(e) - | RuntimeUpdaterError::CannotFetchNewMetadata(e) => Some(e), - _ => None, - } - } -} - -/// Error that can occur during upgrade. -#[non_exhaustive] -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum RuntimeUpdateeApplyError { - #[error("The proposed runtime update is the same as the current version")] - SameVersion, -} - -/// Error working with Runtime APIs -#[non_exhaustive] -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum RuntimeApiError { - #[error("Cannot access Runtime APIs at latest block: Cannot fetch latest finalized block: {0}")] - CannotGetLatestFinalizedBlock(BackendError), - #[error("{0}")] - OfflineError(#[from] CoreRuntimeApiError), - #[error("Cannot call the Runtime API: {0}")] - CannotCallApi(BackendError), -} - -impl RuntimeApiError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - RuntimeApiError::CannotGetLatestFinalizedBlock(e) - | RuntimeApiError::CannotCallApi(e) => Some(e), - _ => None, - } - } -} - -/// Error working with events. -#[non_exhaustive] -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum EventsError { - #[error("{0}")] - OfflineError(#[from] CoreEventsError), - #[error("Cannot access events at latest block: Cannot fetch latest finalized block: {0}")] - CannotGetLatestFinalizedBlock(BackendError), - #[error("Cannot fetch event bytes: {0}")] - CannotFetchEventBytes(BackendError), -} - -impl EventsError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - EventsError::CannotGetLatestFinalizedBlock(e) - | EventsError::CannotFetchEventBytes(e) => Some(e), - _ => None, - } - } -} - -/// Error working with extrinsics. -#[non_exhaustive] -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum ExtrinsicError { - #[error("{0}")] - OfflineError(#[from] CoreExtrinsicError), - #[error("Could not download block body to extract extrinsics from: {0}")] - CannotGetBlockBody(BackendError), - #[error("Block not found: {0}")] - BlockNotFound(Hex), - #[error("{0}")] - CouldNotDecodeExtrinsics(#[from] ExtrinsicDecodeErrorAt), - #[error( - "Extrinsic submission error: Cannot get latest finalized block to grab account nonce at: {0}" - )] - CannotGetLatestFinalizedBlock(BackendError), - #[error("Cannot find block header for block {block_hash}")] - CannotFindBlockHeader { block_hash: Hex }, - #[error("Error getting account nonce at block {block_hash}")] - AccountNonceError { - block_hash: Hex, - account_id: Hex, - reason: AccountNonceError, - }, - #[error("Cannot submit extrinsic: {0}")] - ErrorSubmittingTransaction(BackendError), - #[error("A transaction status error was returned while submitting the extrinsic: {0}")] - TransactionStatusError(TransactionStatusError), - #[error( - "The transaction status stream encountered an error while submitting the extrinsic: {0}" - )] - TransactionStatusStreamError(BackendError), - #[error( - "The transaction status stream unexpectedly ended, so we don't know the status of the submitted extrinsic" - )] - UnexpectedEndOfTransactionStatusStream, - #[error("Cannot get fee info from Runtime API: {0}")] - CannotGetFeeInfo(BackendError), - #[error("Cannot get validation info from Runtime API: {0}")] - CannotGetValidationInfo(BackendError), - #[error("Cannot decode ValidationResult bytes: {0}")] - CannotDecodeValidationResult(codec::Error), - #[error("ValidationResult bytes could not be decoded")] - UnexpectedValidationResultBytes(Vec), -} - -impl ExtrinsicError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - ExtrinsicError::CannotGetBlockBody(e) - | ExtrinsicError::CannotGetLatestFinalizedBlock(e) - | ExtrinsicError::ErrorSubmittingTransaction(e) - | ExtrinsicError::TransactionStatusStreamError(e) - | ExtrinsicError::CannotGetFeeInfo(e) - | ExtrinsicError::CannotGetValidationInfo(e) => Some(e), - ExtrinsicError::AccountNonceError { reason, .. } => reason.backend_error(), - _ => None, - } - } -} - -/// Error working with View Functions. -#[non_exhaustive] -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum ViewFunctionError { - #[error("{0}")] - OfflineError(#[from] CoreViewFunctionError), - #[error( - "Cannot access View Functions at latest block: Cannot fetch latest finalized block: {0}" - )] - CannotGetLatestFinalizedBlock(BackendError), - #[error("Cannot call the View Function Runtime API: {0}")] - CannotCallApi(BackendError), -} - -impl ViewFunctionError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - ViewFunctionError::CannotGetLatestFinalizedBlock(e) - | ViewFunctionError::CannotCallApi(e) => Some(e), - _ => None, - } - } -} - -/// Error during the transaction progress. -#[non_exhaustive] -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum TransactionProgressError { - #[error("Cannot get the next transaction progress update: {0}")] - CannotGetNextProgressUpdate(BackendError), - #[error("Error during transaction progress: {0}")] - TransactionStatusError(#[from] TransactionStatusError), - #[error( - "The transaction status stream unexpectedly ended, so we have no further transaction progress updates" - )] - UnexpectedEndOfTransactionStatusStream, -} - -impl TransactionProgressError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - TransactionProgressError::CannotGetNextProgressUpdate(e) => Some(e), - TransactionProgressError::TransactionStatusError(_) => None, - TransactionProgressError::UnexpectedEndOfTransactionStatusStream => None, - } - } -} - -/// An error emitted as the result of a transaction progress update. -#[derive(Clone, Debug, Eq, thiserror::Error, PartialEq)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum TransactionStatusError { - /// An error happened on the node that the transaction was submitted to. - #[error("Error handling transaction: {0}")] - Error(String), - /// The transaction was deemed invalid. - #[error("The transaction is not valid: {0}")] - Invalid(String), - /// The transaction was dropped. - #[error("The transaction was dropped: {0}")] - Dropped(String), -} - -/// Error fetching events for a just-submitted transaction -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum TransactionEventsError { - #[error( - "The block containing the submitted transaction ({block_hash}) could not be downloaded: {error}" - )] - CannotFetchBlockBody { - block_hash: Hex, - error: BackendError, - }, - #[error( - "Cannot find the the submitted transaction (hash: {transaction_hash}) in the block (hash: {block_hash}) it is supposed to be in." - )] - CannotFindTransactionInBlock { - block_hash: Hex, - transaction_hash: Hex, - }, - #[error("The block containing the submitted transaction ({block_hash}) could not be found")] - BlockNotFound { block_hash: Hex }, - #[error( - "Could not decode event at index {event_index} for the submitted transaction at block {block_hash}: {error}" - )] - CannotDecodeEventInBlock { - event_index: usize, - block_hash: Hex, - error: EventsError, - }, - #[error("Could not fetch events for the submitted transaction: {error}")] - CannotFetchEventsForTransaction { - block_hash: Hex, - transaction_hash: Hex, - error: EventsError, - }, - #[error("The transaction led to a DispatchError, but we failed to decode it: {error}")] - CannotDecodeDispatchError { - error: DispatchErrorDecodeError, - bytes: Vec, - }, - #[error("The transaction failed with the following dispatch error: {0}")] - ExtrinsicFailed(#[from] DispatchError), -} - -impl TransactionEventsError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - TransactionEventsError::CannotFetchBlockBody { error, .. } => Some(error), - TransactionEventsError::CannotDecodeEventInBlock { error, .. } - | TransactionEventsError::CannotFetchEventsForTransaction { error, .. } => { - error.backend_error() - } - _ => None, - } - } -} - -/// Error waiting for the transaction to be finalized and successful. -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs, clippy::large_enum_variant)] -pub enum TransactionFinalizedSuccessError { - #[error("Could not finalize the transaction: {0}")] - FinalizationError(#[from] TransactionProgressError), - #[error("The transaction did not succeed: {0}")] - SuccessError(#[from] TransactionEventsError), -} - -impl TransactionFinalizedSuccessError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - TransactionFinalizedSuccessError::FinalizationError(e) => e.backend_error(), - TransactionFinalizedSuccessError::SuccessError(e) => e.backend_error(), - } - } -} - -/// Error decoding the [`DispatchError`] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum ModuleErrorDetailsError { - #[error( - "Could not get details for the DispatchError: could not find pallet index {pallet_index}" - )] - PalletNotFound { pallet_index: u8 }, - #[error( - "Could not get details for the DispatchError: could not find error index {error_index} in pallet {pallet_name}" - )] - ErrorVariantNotFound { - pallet_name: String, - error_index: u8, - }, -} - -/// Error decoding the [`ModuleError`] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -#[error("Could not decode the DispatchError::Module payload into the given type: {0}")] -pub struct ModuleErrorDecodeError(scale_decode::Error); - -/// Error decoding the [`DispatchError`] -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum DispatchErrorDecodeError { - #[error( - "Could not decode the DispatchError: could not find the corresponding type ID in the metadata" - )] - DispatchErrorTypeIdNotFound, - #[error("Could not decode the DispatchError: {0}")] - CouldNotDecodeDispatchError(scale_decode::Error), - #[error("Could not decode the DispatchError::Module variant")] - CouldNotDecodeModuleError { - /// The bytes corresponding to the Module variant we were unable to decode: - bytes: Vec, - }, -} - -/// Error working with storage. -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -#[allow(missing_docs)] -pub enum StorageError { - #[error("{0}")] - Offline(#[from] CoreStorageError), - #[error("Cannot access storage at latest block: Cannot fetch latest finalized block: {0}")] - CannotGetLatestFinalizedBlock(BackendError), - #[error( - "No storage value found at the given address, and no default value to fall back to using." - )] - NoValueFound, - #[error("Cannot fetch the storage value: {0}")] - CannotFetchValue(BackendError), - #[error("Cannot iterate storage values: {0}")] - CannotIterateValues(BackendError), - #[error("Encountered an error iterating over storage values: {0}")] - StreamFailure(BackendError), - #[error("Cannot decode the storage version for a given entry: {0}")] - CannotDecodeStorageVersion(codec::Error), -} - -impl StorageError { - fn backend_error(&self) -> Option<&BackendError> { - match self { - StorageError::CannotGetLatestFinalizedBlock(e) - | StorageError::CannotFetchValue(e) - | StorageError::CannotIterateValues(e) - | StorageError::StreamFailure(e) => Some(e), - _ => None, - } - } -} diff --git a/new/src/events.rs b/subxt/src/events.rs similarity index 92% rename from new/src/events.rs rename to subxt/src/events.rs index 83dc7b4f38..8b944f2cc2 100644 --- a/new/src/events.rs +++ b/subxt/src/events.rs @@ -4,11 +4,11 @@ use crate::backend::BackendExt; use crate::client::{OfflineClientAtBlockT, OnlineClientAtBlockT}; use crate::config::{Config, HashFor}; use crate::error::EventsError; +use crate::{ArcMetadata, Metadata}; use codec::{Compact, Decode, Encode}; use scale_decode::{DecodeAsFields, DecodeAsType}; use std::marker::PhantomData; use std::sync::Arc; -use subxt_metadata::Metadata; pub use decode_as_event::DecodeAsEvent; @@ -32,7 +32,7 @@ impl<'atblock, T: Config, Client: OfflineClientAtBlockT> EventsClient<'atbloc /// /// No attempt to validate the provided bytes is made here; if invalid bytes are /// provided then attempting to iterate and decode them will fail. - pub fn from_bytes(&self, event_bytes: Vec) -> Events<'atblock, T> { + pub fn from_bytes(&self, event_bytes: Vec) -> Events { // event_bytes is a SCALE encoded vector of events. So, pluck the // compact encoded length from the front, leaving the remaining bytes // for our iterating to decode. @@ -46,7 +46,7 @@ impl<'atblock, T: Config, Client: OfflineClientAtBlockT> EventsClient<'atbloc let start_idx = event_bytes.len() - cursor.len(); Events { - metadata: self.client.metadata_ref(), + metadata: self.client.metadata(), event_bytes: event_bytes.into(), start_idx, num_events, @@ -57,7 +57,7 @@ impl<'atblock, T: Config, Client: OfflineClientAtBlockT> EventsClient<'atbloc impl<'atblock, T: Config, Client: OnlineClientAtBlockT> EventsClient<'atblock, T, Client> { /// Fetch the events at this block. - pub async fn fetch(&self) -> Result, EventsError> { + pub async fn fetch(&self) -> Result, EventsError> { let client = self.client; // Fetch the bytes. Ensure things work if we get 0 bytes back. @@ -74,9 +74,14 @@ impl<'atblock, T: Config, Client: OnlineClientAtBlockT> EventsClient<'atblock } /// The events at some block. +// Dev note [jsdw]: +// It would be nice if this borrowed &'atblock Metadata, to be +// consistent with many other things and allow longer lifetimes +// on a couple of bits, but we need to construct this from transaction +// things and can't provide lifetimes from there. #[derive(Debug)] -pub struct Events<'atblock, T> { - metadata: &'atblock Metadata, +pub struct Events { + metadata: ArcMetadata, // Note; raw event bytes are prefixed with a Compact containing // the number of events to be decoded. The start_idx reflects that, so // that we can skip over those bytes when decoding them @@ -86,7 +91,7 @@ pub struct Events<'atblock, T> { marker: core::marker::PhantomData, } -impl<'atblock, T: Config> Events<'atblock, T> { +impl Events { /// The number of events. pub fn len(&self) -> u32 { self.num_events @@ -108,9 +113,7 @@ impl<'atblock, T: Config> Events<'atblock, T> { /// details. If an error occurs, all subsequent iterations return `None`. // Dev note: The returned iterator is 'static + Send so that we can box it up and make // use of it with our `FilterEvents` stuff. - pub fn iter( - &'_ self, - ) -> impl Iterator, EventsError>> + Send + Sync { + pub fn iter(&'_ self) -> impl Iterator, EventsError>> + Send + Sync { // The event bytes ignoring the compact encoded length on the front: let event_bytes = self.event_bytes.clone(); let metadata = &*self.metadata; @@ -153,6 +156,12 @@ impl<'atblock, T: Config> Events<'atblock, T> { .filter_map(|e| e.decode_fields_as::()) } + /// Find the first event matching the given type, returning `None` if it doesn't exist, + /// and the result of decoding it if it does. + pub fn find_first(&self) -> Option> { + self.find::().next() + } + /// Find an event matching the given type, returning true if it exists. This function does _not_ /// try to actually decode the event bytes into the given type. pub fn has(&self) -> bool { @@ -173,10 +182,10 @@ pub enum Phase { /// The event details. #[derive(Debug, Clone)] -pub struct Event<'atblock, T: Config> { - pallet_name: &'atblock str, - event_name: &'atblock str, - metadata: &'atblock Metadata, +pub struct Event<'events, T: Config> { + pallet_name: &'events str, + event_name: &'events str, + metadata: &'events Metadata, // all of the event bytes (not just this one). all_bytes: Arc<[u8]>, // event phase. @@ -197,14 +206,14 @@ pub struct Event<'atblock, T: Config> { topics: Vec>, } -impl<'atblock, T: Config> Event<'atblock, T> { +impl<'events, T: Config> Event<'events, T> { /// Attempt to dynamically decode a single event from our events input. fn decode_from( - metadata: &'atblock Metadata, + metadata: &'events Metadata, all_bytes: Arc<[u8]>, start_idx: usize, index: u32, - ) -> Result, EventsError> { + ) -> Result, EventsError> { let input = &mut &all_bytes[start_idx..]; let phase = Phase::decode(input).map_err(EventsError::CannotDecodePhase)?; @@ -304,12 +313,12 @@ impl<'atblock, T: Config> Event<'atblock, T> { } /// The name of the pallet from whence the Event originated. - pub fn pallet_name(&self) -> &str { + pub fn pallet_name(&self) -> &'events str { self.pallet_name } /// The name of the event (ie the name of the variant that it corresponds to). - pub fn event_name(&self) -> &str { + pub fn event_name(&self) -> &'events str { self.event_name } diff --git a/new/src/events/decode_as_event.rs b/subxt/src/events/decode_as_event.rs similarity index 100% rename from new/src/events/decode_as_event.rs rename to subxt/src/events/decode_as_event.rs diff --git a/subxt/src/events/events_client.rs b/subxt/src/events/events_client.rs deleted file mode 100644 index dc0ee1924c..0000000000 --- a/subxt/src/events/events_client.rs +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::backend::{Backend, BackendExt, BlockRef}; -use crate::{ - client::OnlineClientT, - config::{Config, HashFor}, - error::EventsError, - events::Events, -}; -use derive_where::derive_where; -use std::future::Future; - -/// A client for working with events. -#[derive_where(Clone; Client)] -pub struct EventsClient { - client: Client, - _marker: std::marker::PhantomData, -} - -impl EventsClient { - /// Create a new [`EventsClient`]. - pub fn new(client: Client) -> Self { - Self { - client, - _marker: std::marker::PhantomData, - } - } -} - -impl EventsClient -where - T: Config, - Client: OnlineClientT, -{ - /// Obtain events at some block hash. - /// - /// # Warning - /// - /// This call only supports blocks produced since the most recent - /// runtime upgrade. You can attempt to retrieve events from older blocks, - /// but may run into errors attempting to work with them. - pub fn at( - &self, - block_ref: impl Into>>, - ) -> impl Future, EventsError>> + Send + 'static { - self.at_or_latest(Some(block_ref.into())) - } - - /// Obtain events for the latest finalized block. - pub fn at_latest( - &self, - ) -> impl Future, EventsError>> + Send + 'static { - self.at_or_latest(None) - } - - /// Obtain events at some block hash. - fn at_or_latest( - &self, - block_ref: Option>>, - ) -> impl Future, EventsError>> + Send + 'static { - // Clone and pass the client in like this so that we can explicitly - // return a Future that's Send + 'static, rather than tied to &self. - let client = self.client.clone(); - async move { - // If a block ref isn't provided, we'll get the latest finalized block to use. - let block_ref = match block_ref { - Some(r) => r, - None => client - .backend() - .latest_finalized_block_ref() - .await - .map_err(EventsError::CannotGetLatestFinalizedBlock)?, - }; - - let event_bytes = get_event_bytes(client.backend(), block_ref.hash()).await?; - Ok(Events::decode_from(event_bytes, client.metadata())) - } - } -} - -// The storage key needed to access events. -fn system_events_key() -> [u8; 32] { - let a = sp_crypto_hashing::twox_128(b"System"); - let b = sp_crypto_hashing::twox_128(b"Events"); - let mut res = [0; 32]; - res[0..16].clone_from_slice(&a); - res[16..32].clone_from_slice(&b); - res -} - -// Get the event bytes from the provided client, at the provided block hash. -pub(crate) async fn get_event_bytes( - backend: &dyn Backend, - block_hash: HashFor, -) -> Result, EventsError> { - let bytes = backend - .storage_fetch_value(system_events_key().to_vec(), block_hash) - .await - .map_err(EventsError::CannotFetchEventBytes)? - .unwrap_or_default(); - Ok(bytes) -} diff --git a/subxt/src/events/events_type.rs b/subxt/src/events/events_type.rs deleted file mode 100644 index 04b706656e..0000000000 --- a/subxt/src/events/events_type.rs +++ /dev/null @@ -1,163 +0,0 @@ -use crate::{ - Metadata, - config::{Config, HashFor}, - error::EventsError, -}; -use derive_where::derive_where; -use scale_decode::{DecodeAsFields, DecodeAsType}; -use subxt_core::events::{EventDetails as CoreEventDetails, Events as CoreEvents}; - -pub use subxt_core::events::{EventMetadataDetails, Phase, StaticEvent}; - -/// A collection of events obtained from a block, bundled with the necessary -/// information needed to decode and iterate over them. -// Dev note: we are just wrapping the subxt_core types here to avoid leaking them -// in Subxt and map any errors into Subxt errors so that we don't have this part of the -// API returning a different error type (ie the subxt_core::Error). -#[derive_where(Clone, Debug)] -pub struct Events { - inner: CoreEvents, -} - -impl Events { - /// Create a new [`Events`] instance from the given bytes. - pub fn decode_from(event_bytes: Vec, metadata: Metadata) -> Self { - Self { - inner: CoreEvents::decode_from(event_bytes, metadata), - } - } - - /// The number of events. - pub fn len(&self) -> u32 { - self.inner.len() - } - - /// Are there no events in this block? - // Note: mainly here to satisfy clippy.. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } - - /// Return the bytes representing all of the events. - pub fn bytes(&self) -> &[u8] { - self.inner.bytes() - } - - /// Iterate over all of the events, using metadata to dynamically - /// decode them as we go, and returning the raw bytes and other associated - /// details. If an error occurs, all subsequent iterations return `None`. - // Dev note: The returned iterator is 'static + Send so that we can box it up and make - // use of it with our `FilterEvents` stuff. - pub fn iter( - &self, - ) -> impl Iterator, EventsError>> + Send + Sync + 'static { - self.inner - .iter() - .map(|item| item.map(|e| EventDetails { inner: e }).map_err(Into::into)) - } - - /// Iterate through the events using metadata to dynamically decode and skip - /// them, and return only those which should decode to the provided `Ev` type. - /// If an error occurs, all subsequent iterations return `None`. - pub fn find(&self) -> impl Iterator> { - self.inner.find::().map(|item| item.map_err(Into::into)) - } - - /// Iterate through the events using metadata to dynamically decode and skip - /// them, and return the first event found which decodes to the provided `Ev` type. - pub fn find_first(&self) -> Result, EventsError> { - self.inner.find_first::().map_err(Into::into) - } - - /// Iterate through the events using metadata to dynamically decode and skip - /// them, and return the last event found which decodes to the provided `Ev` type. - pub fn find_last(&self) -> Result, EventsError> { - self.inner.find_last::().map_err(Into::into) - } - - /// Find an event that decodes to the type provided. Returns true if it was found. - pub fn has(&self) -> Result { - self.inner.has::().map_err(Into::into) - } -} - -/// The event details. -#[derive(Debug, Clone)] -pub struct EventDetails { - inner: CoreEventDetails, -} - -impl EventDetails { - /// When was the event produced? - pub fn phase(&self) -> Phase { - self.inner.phase() - } - - /// What index is this event in the stored events for this block. - pub fn index(&self) -> u32 { - self.inner.index() - } - - /// The index of the pallet that the event originated from. - pub fn pallet_index(&self) -> u8 { - self.inner.pallet_index() - } - - /// The index of the event variant that the event originated from. - pub fn variant_index(&self) -> u8 { - self.inner.variant_index() - } - - /// The name of the pallet from whence the Event originated. - pub fn pallet_name(&self) -> &str { - self.inner.pallet_name() - } - - /// The name of the event (ie the name of the variant that it corresponds to). - pub fn variant_name(&self) -> &str { - self.inner.variant_name() - } - - /// Fetch details from the metadata for this event. - pub fn event_metadata(&self) -> EventMetadataDetails<'_> { - self.inner.event_metadata() - } - - /// Return _all_ of the bytes representing this event, which include, in order: - /// - The phase. - /// - Pallet and event index. - /// - Event fields. - /// - Event Topics. - pub fn bytes(&self) -> &[u8] { - self.inner.bytes() - } - - /// Return the bytes representing the fields stored in this event. - pub fn field_bytes(&self) -> &[u8] { - self.inner.field_bytes() - } - - /// Decode and provide the event fields back in the form of a [`scale_value::Composite`] - /// type which represents the named or unnamed fields that were present in the event. - pub fn decode_as_fields(&self) -> Result { - self.inner.decode_as_fields().map_err(Into::into) - } - - /// Attempt to decode these [`EventDetails`] into a type representing the event fields. - /// Such types are exposed in the codegen as `pallet_name::events::EventName` types. - pub fn as_event(&self) -> Result, EventsError> { - self.inner.as_event::().map_err(Into::into) - } - - /// Attempt to decode these [`EventDetails`] into a root event type (which includes - /// the pallet and event enum variants as well as the event fields). A compatible - /// type for this is exposed via static codegen as a root level `Event` type. - pub fn as_root_event(&self) -> Result { - self.inner.as_root_event::().map_err(Into::into) - } - - /// Return the topics associated with this event. - pub fn topics(&self) -> &[HashFor] { - self.inner.topics() - } -} diff --git a/subxt/src/events/mod.rs b/subxt/src/events/mod.rs deleted file mode 100644 index 185cafa250..0000000000 --- a/subxt/src/events/mod.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! This module exposes the types and such necessary for working with events. -//! The two main entry points into events are [`crate::OnlineClient::events()`] -//! and calls like [crate::tx::TxProgress::wait_for_finalized_success()]. - -mod events_client; -mod events_type; - -use crate::client::OnlineClientT; -use crate::error::EventsError; -use subxt_core::{ - Metadata, - config::{Config, HashFor}, -}; - -pub use events_client::EventsClient; -pub use events_type::{EventDetails, EventMetadataDetails, Events, Phase, StaticEvent}; - -/// Creates a new [`Events`] instance by fetching the corresponding bytes at `block_hash` from the client. -pub async fn new_events_from_client( - metadata: Metadata, - block_hash: HashFor, - client: C, -) -> Result, EventsError> -where - T: Config, - C: OnlineClientT, -{ - let event_bytes = events_client::get_event_bytes(client.backend(), block_hash).await?; - Ok(Events::::decode_from(event_bytes, metadata)) -} diff --git a/new/src/extrinsics.rs b/subxt/src/extrinsics.rs similarity index 94% rename from new/src/extrinsics.rs rename to subxt/src/extrinsics.rs index 35dbeda472..144573cfa4 100644 --- a/new/src/extrinsics.rs +++ b/subxt/src/extrinsics.rs @@ -141,6 +141,12 @@ impl<'atblock, T: Config, C: OfflineClientAtBlockT> Extrinsics<'atblock, T, C .filter_map(|e| e.decode_call_data_fields_as::()) } + /// Find the first extrinsic matching the given type, returning `None` if it doesn't exist, + /// and the result of decoding it if it does. + pub fn find_first(&self) -> Option> { + self.find::().next() + } + /// Find an extrinsic matching the given type, returning true if it exists. This function does _not_ /// try to actually decode the extrinsic bytes into the given type. pub fn has(&self) -> bool { @@ -343,14 +349,14 @@ where C: OnlineClientAtBlockT, { /// The events associated with the extrinsic. - pub async fn events(&self) -> Result, EventsError> { + pub async fn events(&self) -> Result, EventsError> { ExtrinsicEvents::fetch(self.client, self.hash(), self.index()).await } } /// The events associated with a given extrinsic. #[derive(Debug)] -pub struct ExtrinsicEvents<'atblock, T: Config> { +pub struct ExtrinsicEvents { // The hash of the extrinsic (handy to expose here because // this type is returned from TxProgress things in the most // basic flows, so it's the only place people can access it @@ -359,12 +365,12 @@ pub struct ExtrinsicEvents<'atblock, T: Config> { // The index of the extrinsic: extrinsic_index: usize, // All of the events in the block: - events: crate::events::Events<'atblock, T>, + events: crate::events::Events, } -impl<'atblock, T: Config> ExtrinsicEvents<'atblock, T> { +impl ExtrinsicEvents { pub(crate) async fn fetch( - client: &'atblock impl OnlineClientAtBlockT, + client: &impl OnlineClientAtBlockT, extrinsic_hash: HashFor, extrinsic_index: usize, ) -> Result { @@ -387,7 +393,7 @@ impl<'atblock, T: Config> ExtrinsicEvents<'atblock, T> { } /// Return all of the events in the block that the extrinsic is in. - pub fn all_events_in_block(&self) -> &events::Events<'atblock, T> { + pub fn all_events_in_block(&self) -> &events::Events { &self.events } @@ -413,6 +419,12 @@ impl<'atblock, T: Config> ExtrinsicEvents<'atblock, T> { .filter_map(|e| e.decode_fields_as::()) } + /// Find the first event matching the given type, returning `None` if it doesn't exist, + /// and the result of decoding it if it does. + pub fn find_first(&self) -> Option> { + self.find::().next() + } + /// Find an event matching the given type, returning true if it exists. This function does _not_ /// try to actually decode the event bytes into the given type. pub fn has(&self) -> bool { diff --git a/new/src/extrinsics/decode_as_extrinsic.rs b/subxt/src/extrinsics/decode_as_extrinsic.rs similarity index 100% rename from new/src/extrinsics/decode_as_extrinsic.rs rename to subxt/src/extrinsics/decode_as_extrinsic.rs diff --git a/new/src/extrinsics/extrinsic_transaction_extensions.rs b/subxt/src/extrinsics/extrinsic_transaction_extensions.rs similarity index 100% rename from new/src/extrinsics/extrinsic_transaction_extensions.rs rename to subxt/src/extrinsics/extrinsic_transaction_extensions.rs diff --git a/subxt/src/lib.rs b/subxt/src/lib.rs index 6434e16b4c..d80b530a4b 100644 --- a/subxt/src/lib.rs +++ b/subxt/src/lib.rs @@ -2,94 +2,71 @@ // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. +// TODO: REMOVE BEFORE MERGING. +#![allow(missing_docs)] + //! Subxt is a library for interacting with Substrate based nodes. Using it looks something like this: //! //! ```rust,ignore -#![doc = include_str!("../examples/tx_basic.rs")] +#![doc = include_str!("../examples/transactions_basic.rs")] //! ``` //! //! Take a look at [the Subxt guide](book) to learn more about how to use Subxt. -#![cfg_attr(docsrs, feature(doc_cfg))] - #[cfg(any( all(feature = "web", feature = "native"), not(any(feature = "web", feature = "native")) ))] compile_error!("subxt: exactly one of the 'web' and 'native' features should be used."); -// Internal helper macros -#[macro_use] -mod macros; +// TODO: Do we need this still? +// // Suppress an unused dependency warning because these are +// // only used in example code snippets at the time of writing. +// #[cfg(test)] +// mod only_used_in_docs_or_tests { +// use subxt_signer as _; +// use tokio as _; +// use tracing_subscriber as _; +// } -// The guide is here. -pub mod book; - -// Suppress an unused dependency warning because tokio is -// only used in example code snippets at the time of writing. -#[cfg(test)] -mod only_used_in_docs_or_tests { - use subxt_signer as _; - use tokio as _; -} - -// Suppress an unused dependency warning because tracing_subscriber is -// only used in example code snippets at the time of writing. -#[cfg(test)] -use tracing_subscriber as _; +// This is exposed so that the code generated by subxt-codegen can avoid +// relying on std things. Given that it relies on subxt, it _must_ use std, +// but this may change if we move things back to a no-std core/common crate. +// that it can point at. +// +// Undocumented and **should not be depended on by anybody else**. +#[doc(hidden)] +pub extern crate alloc; pub mod backend; -pub mod blocks; pub mod client; +pub mod config; pub mod constants; pub mod custom_values; +pub mod dynamic; pub mod error; pub mod events; -pub mod runtime_api; +pub mod extrinsics; +pub mod metadata; +pub mod runtime_apis; pub mod storage; -pub mod tx; +pub mod transactions; pub mod utils; pub mod view_functions; -/// This module provides a [`Config`] type, which is used to define various -/// types that are important in order to speak to a particular chain. -/// [`SubstrateConfig`] provides a default set of these types suitable for the -/// default Substrate node implementation, and [`PolkadotConfig`] for a -/// Polkadot node. -pub mod config { - pub use subxt_core::config::{ - Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, ExtrinsicParams, - ExtrinsicParamsEncoder, Hash, HashFor, Hasher, Header, PolkadotConfig, - PolkadotExtrinsicParams, SubstrateConfig, SubstrateExtrinsicParams, TransactionExtension, - polkadot, substrate, transaction_extensions, - }; - pub use subxt_core::error::ExtrinsicParamsError; -} - -/// Types representing the metadata obtained from a node. -pub mod metadata { - pub use subxt_metadata::*; -} - -/// Submit dynamic transactions. -pub mod dynamic { - pub use subxt_core::dynamic::*; -} - -// Expose light client bits -cfg_unstable_light_client! { - pub use subxt_lightclient as lightclient; -} - // Expose a few of the most common types at root, // but leave most types behind their respective modules. pub use crate::{ client::{OfflineClient, OnlineClient}, config::{Config, PolkadotConfig, SubstrateConfig}, error::Error, - metadata::Metadata, + metadata::{ArcMetadata, Metadata}, }; +// Expose light client bits +#[cfg(feature = "unstable-light-client")] +pub use subxt_lightclient as lightclient; + /// Re-export external crates that are made use of in the subxt API. pub mod ext { pub use codec; @@ -99,12 +76,10 @@ pub mod ext { pub use scale_decode; pub use scale_encode; pub use scale_value; - pub use subxt_core; pub use subxt_rpcs; - cfg_jsonrpsee! { - pub use jsonrpsee; - } + #[cfg(feature = "jsonrpsee")] + pub use jsonrpsee; } /// Generate a strongly typed API for interacting with a Substrate runtime from its metadata of WASM. @@ -146,15 +121,6 @@ pub mod ext { /// mod polkadot {} /// ``` /// -/// You can use the `$OUT_DIR` placeholder in the path to reference metadata generated at build time: -/// -/// ```rust,ignore -/// #[subxt::subxt( -/// runtime_metadata_path = "$OUT_DIR/metadata.scale", -/// )] -/// mod polkadot {} -/// ``` -/// /// ## Using a WASM runtime via `runtime_path = "..."` /// /// This requires the `runtime-wasm-path` feature flag. @@ -168,15 +134,6 @@ pub mod ext { /// mod polkadot {} /// ``` /// -/// You can also use the `$OUT_DIR` placeholder in the path to reference WASM files generated at build time: -/// -/// ```rust,ignore -/// #[subxt::subxt( -/// runtime_path = "$OUT_DIR/runtime.wasm", -/// )] -/// mod polkadot {} -/// ``` -/// /// ## Connecting to a node to download metadata via `runtime_metadata_insecure_url = "..."` /// /// This will, at compile time, connect to the JSON-RPC interface for some node at the URL given, diff --git a/subxt/src/macros.rs b/subxt/src/macros.rs deleted file mode 100644 index 2620dd1608..0000000000 --- a/subxt/src/macros.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -macro_rules! cfg_feature { - ($feature:literal, $($item:item)*) => { - $( - #[cfg(feature = $feature)] - #[cfg_attr(docsrs, doc(cfg(feature = $feature)))] - $item - )* - } -} - -macro_rules! cfg_unstable_light_client { - ($($item:item)*) => { - crate::macros::cfg_feature!("unstable-light-client", $($item)*); - }; -} - -macro_rules! cfg_reconnecting_rpc_client { - ($($item:item)*) => { - crate::macros::cfg_feature!("reconnecting-rpc-client", $($item)*); - }; -} - -macro_rules! cfg_jsonrpsee { - ($($item:item)*) => { - crate::macros::cfg_feature!("jsonrpsee", $($item)*); - }; -} - -#[allow(unused)] -macro_rules! cfg_jsonrpsee_native { - ($($item:item)*) => { - $( - #[cfg(all(feature = "jsonrpsee", feature = "native"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "jsonrpsee", feature = "native"))))] - $item - )* - } -} - -#[allow(unused)] -macro_rules! cfg_jsonrpsee_web { - ($($item:item)*) => { - $( - #[cfg(all(feature = "jsonrpsee", feature = "web"))] - #[cfg_attr(docsrs, doc(cfg(all(feature = "jsonrpsee", feature = "web"))))] - $item - )* - } -} - -pub(crate) use {cfg_feature, cfg_jsonrpsee, cfg_unstable_light_client}; - -// Only used by light-client. -#[allow(unused)] -pub(crate) use {cfg_jsonrpsee_native, cfg_jsonrpsee_web, cfg_reconnecting_rpc_client}; diff --git a/subxt/src/metadata.rs b/subxt/src/metadata.rs new file mode 100644 index 0000000000..58c8474655 --- /dev/null +++ b/subxt/src/metadata.rs @@ -0,0 +1,7 @@ +use std::sync::Arc; + +// Re-export everything from subxt-metadata here. +pub use subxt_metadata::*; + +/// A cheaply clonable version of our [`Metadata`]. +pub type ArcMetadata = Arc; diff --git a/subxt/src/runtime_api/mod.rs b/subxt/src/runtime_api/mod.rs deleted file mode 100644 index f6dafb31cb..0000000000 --- a/subxt/src/runtime_api/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Types associated with executing runtime API calls. - -mod runtime_client; -mod runtime_types; - -pub use runtime_client::RuntimeApiClient; -pub use runtime_types::RuntimeApi; -pub use subxt_core::runtime_api::payload::{DynamicPayload, Payload, StaticPayload, dynamic}; diff --git a/subxt/src/runtime_api/runtime_client.rs b/subxt/src/runtime_api/runtime_client.rs deleted file mode 100644 index 6412468be7..0000000000 --- a/subxt/src/runtime_api/runtime_client.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::runtime_types::RuntimeApi; - -use crate::{ - backend::BlockRef, - client::OnlineClientT, - config::{Config, HashFor}, - error::RuntimeApiError, -}; -use derive_where::derive_where; -use std::{future::Future, marker::PhantomData}; - -/// Execute runtime API calls. -#[derive_where(Clone; Client)] -pub struct RuntimeApiClient { - client: Client, - _marker: PhantomData, -} - -impl RuntimeApiClient { - /// Create a new [`RuntimeApiClient`] - pub fn new(client: Client) -> Self { - Self { - client, - _marker: PhantomData, - } - } -} - -impl RuntimeApiClient -where - T: Config, - Client: OnlineClientT, -{ - /// Obtain a runtime API interface at some block hash. - pub fn at(&self, block_ref: impl Into>>) -> RuntimeApi { - RuntimeApi::new(self.client.clone(), block_ref.into()) - } - - /// Obtain a runtime API interface at the latest finalized block. - pub fn at_latest( - &self, - ) -> impl Future, RuntimeApiError>> + Send + 'static { - // Clone and pass the client in like this so that we can explicitly - // return a Future that's Send + 'static, rather than tied to &self. - let client = self.client.clone(); - async move { - // get the ref for the latest finalized block and use that. - let block_ref = client - .backend() - .latest_finalized_block_ref() - .await - .map_err(RuntimeApiError::CannotGetLatestFinalizedBlock)?; - - Ok(RuntimeApi::new(client, block_ref)) - } - } -} diff --git a/subxt/src/runtime_api/runtime_types.rs b/subxt/src/runtime_api/runtime_types.rs deleted file mode 100644 index 6b0ee00935..0000000000 --- a/subxt/src/runtime_api/runtime_types.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::Payload; -use crate::{ - backend::BlockRef, - client::OnlineClientT, - config::{Config, HashFor}, - error::RuntimeApiError, -}; -use derive_where::derive_where; -use std::{future::Future, marker::PhantomData}; - -/// Execute runtime API calls. -#[derive_where(Clone; Client)] -pub struct RuntimeApi { - client: Client, - block_ref: BlockRef>, - _marker: PhantomData, -} - -impl RuntimeApi { - /// Create a new [`RuntimeApi`] - pub(crate) fn new(client: Client, block_ref: BlockRef>) -> Self { - Self { - client, - block_ref, - _marker: PhantomData, - } - } -} - -impl RuntimeApi -where - T: Config, - Client: OnlineClientT, -{ - /// Run the validation logic against some runtime API payload you'd like to use. Returns `Ok(())` - /// if the payload is valid (or if it's not possible to check since the payload has no validation hash). - /// Return an error if the payload was not valid or something went wrong trying to validate it (ie - /// the runtime API in question do not exist at all) - pub fn validate(&self, payload: Call) -> Result<(), RuntimeApiError> { - subxt_core::runtime_api::validate(payload, &self.client.metadata()).map_err(Into::into) - } - - /// Execute a raw runtime API call. This returns the raw bytes representing the result - /// of this call. The caller is responsible for decoding the result. - pub fn call_raw<'a>( - &self, - function: &'a str, - call_parameters: Option<&'a [u8]>, - ) -> impl Future, RuntimeApiError>> + use<'a, Client, T> { - let client = self.client.clone(); - let block_hash = self.block_ref.hash(); - // Ensure that the returned future doesn't have a lifetime tied to api.runtime_api(), - // which is a temporary thing we'll be throwing away quickly: - async move { - let data = client - .backend() - .call(function, call_parameters, block_hash) - .await - .map_err(RuntimeApiError::CannotCallApi)?; - Ok(data) - } - } - - /// Execute a runtime API call. - pub fn call( - &self, - payload: Call, - ) -> impl Future> + use - { - let client = self.client.clone(); - let block_hash = self.block_ref.hash(); - // Ensure that the returned future doesn't have a lifetime tied to api.runtime_api(), - // which is a temporary thing we'll be throwing away quickly: - async move { - let metadata = client.metadata(); - - // Validate the runtime API payload hash against the compile hash from codegen. - subxt_core::runtime_api::validate(&payload, &metadata)?; - - // Encode the arguments of the runtime call. - let call_name = subxt_core::runtime_api::call_name(&payload); - let call_args = subxt_core::runtime_api::call_args(&payload, &metadata)?; - - // Make the call. - let bytes = client - .backend() - .call(&call_name, Some(call_args.as_slice()), block_hash) - .await - .map_err(RuntimeApiError::CannotCallApi)?; - - // Decode the response. - let value = subxt_core::runtime_api::decode_value(&mut &*bytes, &payload, &metadata)?; - Ok(value) - } - } -} diff --git a/new/src/runtime_apis.rs b/subxt/src/runtime_apis.rs similarity index 100% rename from new/src/runtime_apis.rs rename to subxt/src/runtime_apis.rs diff --git a/new/src/runtime_apis/payload.rs b/subxt/src/runtime_apis/payload.rs similarity index 100% rename from new/src/runtime_apis/payload.rs rename to subxt/src/runtime_apis/payload.rs diff --git a/new/src/storage.rs b/subxt/src/storage.rs similarity index 100% rename from new/src/storage.rs rename to subxt/src/storage.rs diff --git a/new/src/storage/address.rs b/subxt/src/storage/address.rs similarity index 100% rename from new/src/storage/address.rs rename to subxt/src/storage/address.rs diff --git a/subxt/src/storage/mod.rs b/subxt/src/storage/mod.rs deleted file mode 100644 index c62718d709..0000000000 --- a/subxt/src/storage/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Types associated with accessing and working with storage items. - -mod storage_client; -mod storage_client_at; - -pub use storage_client::StorageClient; -pub use storage_client_at::{StorageClientAt, StorageEntryClient, StorageKeyValue, StorageValue}; -pub use subxt_core::storage::address::{Address, DynamicAddress, StaticAddress, dynamic}; diff --git a/new/src/storage/prefix_of.rs b/subxt/src/storage/prefix_of.rs similarity index 100% rename from new/src/storage/prefix_of.rs rename to subxt/src/storage/prefix_of.rs diff --git a/subxt/src/storage/storage_client.rs b/subxt/src/storage/storage_client.rs deleted file mode 100644 index ca6fcffa57..0000000000 --- a/subxt/src/storage/storage_client.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::storage_client_at::StorageClientAt; -use crate::{ - backend::BlockRef, - client::{OfflineClientT, OnlineClientT}, - config::{Config, HashFor}, - error::StorageError, -}; -use derive_where::derive_where; -use std::{future::Future, marker::PhantomData}; -use subxt_core::storage::address::Address; - -/// Query the runtime storage. -#[derive_where(Clone; Client)] -pub struct StorageClient { - client: Client, - _marker: PhantomData, -} - -impl StorageClient { - /// Create a new [`StorageClient`] - pub fn new(client: Client) -> Self { - Self { - client, - _marker: PhantomData, - } - } -} - -impl StorageClient -where - T: Config, - Client: OfflineClientT, -{ - /// Run the validation logic against some storage address you'd like to access. Returns `Ok(())` - /// if the address is valid (or if it's not possible to check since the address has no validation hash). - /// Return an error if the address was not valid or something went wrong trying to validate it (ie - /// the pallet or storage entry in question do not exist at all). - pub fn validate(&self, address: &Addr) -> Result<(), StorageError> { - subxt_core::storage::validate(address, &self.client.metadata()).map_err(Into::into) - } -} - -impl StorageClient -where - T: Config, - Client: OnlineClientT, -{ - /// Obtain storage at some block hash. - pub fn at(&self, block_ref: impl Into>>) -> StorageClientAt { - StorageClientAt::new(self.client.clone(), block_ref.into()) - } - - /// Obtain storage at the latest finalized block. - pub fn at_latest( - &self, - ) -> impl Future, StorageError>> + Send + 'static - { - // Clone and pass the client in like this so that we can explicitly - // return a Future that's Send + 'static, rather than tied to &self. - let client = self.client.clone(); - async move { - // get the ref for the latest finalized block and use that. - let block_ref = client - .backend() - .latest_finalized_block_ref() - .await - .map_err(StorageError::CannotGetLatestFinalizedBlock)?; - - Ok(StorageClientAt::new(client, block_ref)) - } - } -} diff --git a/subxt/src/storage/storage_client_at.rs b/subxt/src/storage/storage_client_at.rs deleted file mode 100644 index 605c8e38d5..0000000000 --- a/subxt/src/storage/storage_client_at.rs +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::{ - backend::{BackendExt, BlockRef}, - client::{OfflineClientT, OnlineClientT}, - config::{Config, HashFor}, - error::StorageError, -}; -use derive_where::derive_where; -use futures::StreamExt; -use std::marker::PhantomData; -use subxt_core::Metadata; -use subxt_core::storage::{PrefixOf, address::Address}; -use subxt_core::utils::{Maybe, Yes}; - -pub use subxt_core::storage::{StorageKeyValue, StorageValue}; - -/// Query the runtime storage. -#[derive_where(Clone; Client)] -pub struct StorageClientAt { - client: Client, - metadata: Metadata, - block_ref: BlockRef>, - _marker: PhantomData, -} - -impl StorageClientAt -where - T: Config, - Client: OfflineClientT, -{ - /// Create a new [`StorageClientAt`]. - pub(crate) fn new(client: Client, block_ref: BlockRef>) -> Self { - // Retrieve and store metadata here so that we can borrow it in - // subsequent structs, and thus also borrow storage info and - // things that borrow from metadata. - let metadata = client.metadata(); - - Self { - client, - metadata, - block_ref, - _marker: PhantomData, - } - } -} - -impl StorageClientAt -where - T: Config, - Client: OfflineClientT, -{ - /// This returns a [`StorageEntryClient`], which allows working with the storage entry at the provided address. - pub fn entry( - &self, - address: Addr, - ) -> Result, StorageError> { - let inner = subxt_core::storage::entry(address, &self.metadata)?; - Ok(StorageEntryClient { - inner, - client: self.client.clone(), - block_ref: self.block_ref.clone(), - _marker: core::marker::PhantomData, - }) - } -} - -impl StorageClientAt -where - T: Config, - Client: OnlineClientT, -{ - /// This is essentially a shorthand for `client.entry(addr)?.fetch(key_parts)`. See [`StorageEntryClient::fetch()`]. - pub async fn fetch( - &self, - addr: Addr, - key_parts: Addr::KeyParts, - ) -> Result, StorageError> { - let entry = subxt_core::storage::entry(addr, &self.metadata)?; - fetch(&entry, &self.client, self.block_ref.hash(), key_parts).await - } - - /// This is essentially a shorthand for `client.entry(addr)?.try_fetch(key_parts)`. See [`StorageEntryClient::try_fetch()`]. - pub async fn try_fetch( - &self, - addr: Addr, - key_parts: Addr::KeyParts, - ) -> Result>, StorageError> { - let entry = subxt_core::storage::entry(addr, &self.metadata)?; - try_fetch(&entry, &self.client, self.block_ref.hash(), key_parts).await - } - - /// This is essentially a shorthand for `client.entry(addr)?.iter(key_parts)`. See [`StorageEntryClient::iter()`]. - pub async fn iter>( - &'_ self, - addr: Addr, - key_parts: KeyParts, - ) -> Result< - impl futures::Stream, StorageError>> - + use<'_, Addr, Client, T, KeyParts>, - StorageError, - > { - let entry = subxt_core::storage::entry(addr, &self.metadata)?; - iter(entry, &self.client, self.block_ref.hash(), key_parts).await - } - - /// In rare cases, you may wish to fetch a storage value that does not live at a typical address. This method - /// is a fallback for those cases, and allows you to provide the raw storage key bytes corresponding to the - /// entry you wish to obtain. The response will either be the bytes for the value found at that location, or - /// otherwise an error. [`StorageError::NoValueFound`] will be returned in the event that the request was valid - /// but no value lives at the given location). - pub async fn fetch_raw(&self, key_bytes: Vec) -> Result, StorageError> { - let block_hash = self.block_ref.hash(); - let value = self - .client - .backend() - .storage_fetch_value(key_bytes, block_hash) - .await - .map_err(StorageError::CannotFetchValue)? - .ok_or(StorageError::NoValueFound)?; - - Ok(value) - } - - /// The storage version of a pallet. - /// The storage version refers to the `frame_support::traits::Metadata::StorageVersion` type. - pub async fn storage_version(&self, pallet_name: impl AsRef) -> Result { - // construct the storage key. This is done similarly in - // `frame_support::traits::metadata::StorageVersion::storage_key()`: - let mut key_bytes: Vec = vec![]; - key_bytes.extend(&sp_crypto_hashing::twox_128( - pallet_name.as_ref().as_bytes(), - )); - key_bytes.extend(&sp_crypto_hashing::twox_128(b":__STORAGE_VERSION__:")); - - // fetch the raw bytes and decode them into the StorageVersion struct: - let storage_version_bytes = self.fetch_raw(key_bytes).await?; - - ::decode(&mut &storage_version_bytes[..]) - .map_err(StorageError::CannotDecodeStorageVersion) - } - - /// Fetch the runtime WASM code. - pub async fn runtime_wasm_code(&self) -> Result, StorageError> { - // note: this should match the `CODE` constant in `sp_core::storage::well_known_keys` - self.fetch_raw(b":code".to_vec()).await - } -} - -/// This represents a single storage entry (be it a plain value or map) -/// and the operations that can be performed on it. -pub struct StorageEntryClient<'atblock, T: Config, Client, Addr, IsPlain> { - inner: subxt_core::storage::StorageEntry<'atblock, Addr>, - client: Client, - block_ref: BlockRef>, - _marker: PhantomData<(T, IsPlain)>, -} - -impl<'atblock, T, Client, Addr, IsPlain> StorageEntryClient<'atblock, T, Client, Addr, IsPlain> -where - T: Config, - Addr: Address, -{ - /// Name of the pallet containing this storage entry. - pub fn pallet_name(&self) -> &str { - self.inner.pallet_name() - } - - /// Name of the storage entry. - pub fn entry_name(&self) -> &str { - self.inner.entry_name() - } - - /// Is the storage entry a plain value? - pub fn is_plain(&self) -> bool { - self.inner.is_plain() - } - - /// Is the storage entry a map? - pub fn is_map(&self) -> bool { - self.inner.is_map() - } - - /// Return the default value for this storage entry, if there is one. Returns `None` if there - /// is no default value. - pub fn default_value(&self) -> Option> { - self.inner.default_value() - } -} - -// Plain values get a fetch method with no extra arguments. -impl<'atblock, T, Client, Addr> StorageEntryClient<'atblock, T, Client, Addr, Yes> -where - T: Config, - Addr: Address, - Client: OnlineClientT, -{ - /// Fetch the storage value at this location. If no value is found, the default value will be returned - /// for this entry if one exists. If no value is found and no default value exists, an error will be returned. - pub async fn fetch(&self) -> Result, StorageError> { - let value = self.try_fetch().await?.map_or_else( - || self.inner.default_value().ok_or(StorageError::NoValueFound), - Ok, - )?; - - Ok(value) - } - - /// Fetch the storage value at this location. If no value is found, `None` will be returned. - pub async fn try_fetch( - &self, - ) -> Result>, StorageError> { - let value = self - .client - .backend() - .storage_fetch_value(self.key_prefix().to_vec(), self.block_ref.hash()) - .await - .map_err(StorageError::CannotFetchValue)? - .map(|bytes| self.inner.value(bytes)); - - Ok(value) - } - - /// This is identical to [`StorageEntryClient::key_prefix()`] and is the full - /// key for this storage entry. - pub fn key(&self) -> [u8; 32] { - self.inner.key_prefix() - } - - /// The keys for plain storage values are always 32 byte hashes. - pub fn key_prefix(&self) -> [u8; 32] { - self.inner.key_prefix() - } -} - -// When HasDefaultValue = Yes, we expect there to exist a valid default value and will use that -// if we fetch an entry and get nothing back. -impl<'atblock, T, Client, Addr> StorageEntryClient<'atblock, T, Client, Addr, Maybe> -where - T: Config, - Addr: Address, - Client: OnlineClientT, -{ - /// Fetch a storage value within this storage entry. - /// - /// This entry may be a map, and so you must provide the relevant values for each part of the storage - /// key that is required in order to point to a single value. - /// - /// If no value is found, the default value will be returned for this entry if one exists. If no value is - /// found and no default value exists, an error will be returned. - pub async fn fetch( - &self, - key_parts: Addr::KeyParts, - ) -> Result, StorageError> { - fetch(&self.inner, &self.client, self.block_ref.hash(), key_parts).await - } - - /// Fetch a storage value within this storage entry. - /// - /// This entry may be a map, and so you must provide the relevant values for each part of the storage - /// key that is required in order to point to a single value. - /// - /// If no value is found, `None` will be returned. - pub async fn try_fetch( - &self, - key_parts: Addr::KeyParts, - ) -> Result>, StorageError> { - try_fetch(&self.inner, &self.client, self.block_ref.hash(), key_parts).await - } - - /// Iterate over storage values within this storage entry. - /// - /// You may provide any prefix of the values needed to point to a single value. Normally you will - /// provide `()` to iterate over _everything_, or `(first_key,)` to iterate over everything underneath - /// `first_key` in the map, or `(first_key, second_key)` to iterate over everything underneath `first_key` - /// and `second_key` in the map, and so on, up to the actual depth of the map - 1. - pub async fn iter>( - &self, - key_parts: KeyParts, - ) -> Result< - impl futures::Stream, StorageError>> - + use<'atblock, Addr, Client, T, KeyParts>, - StorageError, - > { - iter( - self.inner.clone(), - &self.client, - self.block_ref.hash(), - key_parts, - ) - .await - } - - /// This returns a full key to a single value in this storage entry. - pub fn key(&self, key_parts: Addr::KeyParts) -> Result, StorageError> { - let key = self.inner.fetch_key(key_parts)?; - Ok(key) - } - - /// This returns valid keys to iterate over the storage entry at the available levels. - pub fn iter_key>( - &self, - key_parts: KeyParts, - ) -> Result, StorageError> { - let key = self.inner.iter_key(key_parts)?; - Ok(key) - } - - /// The first 32 bytes of the storage entry key, which points to the entry but not necessarily - /// a single storage value (unless the entry is a plain value). - pub fn key_prefix(&self) -> [u8; 32] { - self.inner.key_prefix() - } -} - -async fn fetch<'atblock, T: Config, Client: OnlineClientT, Addr: Address>( - entry: &subxt_core::storage::StorageEntry<'atblock, Addr>, - client: &Client, - block_hash: HashFor, - key_parts: Addr::KeyParts, -) -> Result, StorageError> { - let value = try_fetch(entry, client, block_hash, key_parts) - .await? - .or_else(|| entry.default_value()) - .unwrap(); - - Ok(value) -} - -async fn try_fetch<'atblock, T: Config, Client: OnlineClientT, Addr: Address>( - entry: &subxt_core::storage::StorageEntry<'atblock, Addr>, - client: &Client, - block_hash: HashFor, - key_parts: Addr::KeyParts, -) -> Result>, StorageError> { - let key = entry.fetch_key(key_parts)?; - - let value = client - .backend() - .storage_fetch_value(key, block_hash) - .await - .map_err(StorageError::CannotFetchValue)? - .map(|bytes| entry.value(bytes)) - .or_else(|| entry.default_value()); - - Ok(value) -} - -async fn iter< - 'atblock, - T: Config, - Client: OnlineClientT, - Addr: Address, - KeyParts: PrefixOf, ->( - entry: subxt_core::storage::StorageEntry<'atblock, Addr>, - client: &Client, - block_hash: HashFor, - key_parts: KeyParts, -) -> Result< - impl futures::Stream, StorageError>> - + use<'atblock, Addr, Client, T, KeyParts>, - StorageError, -> { - let key_bytes = entry.iter_key(key_parts)?; - - let stream = client - .backend() - .storage_fetch_descendant_values(key_bytes, block_hash) - .await - .map_err(StorageError::CannotIterateValues)? - .map(move |kv| { - let kv = match kv { - Ok(kv) => kv, - Err(e) => return Err(StorageError::StreamFailure(e)), - }; - Ok(entry.key_value(kv.key, kv.value)) - }); - - Ok(Box::pin(stream)) -} diff --git a/new/src/storage/storage_entry.rs b/subxt/src/storage/storage_entry.rs similarity index 100% rename from new/src/storage/storage_entry.rs rename to subxt/src/storage/storage_entry.rs diff --git a/new/src/storage/storage_key.rs b/subxt/src/storage/storage_key.rs similarity index 100% rename from new/src/storage/storage_key.rs rename to subxt/src/storage/storage_key.rs diff --git a/new/src/storage/storage_key_value.rs b/subxt/src/storage/storage_key_value.rs similarity index 100% rename from new/src/storage/storage_key_value.rs rename to subxt/src/storage/storage_key_value.rs diff --git a/new/src/storage/storage_value.rs b/subxt/src/storage/storage_value.rs similarity index 100% rename from new/src/storage/storage_value.rs rename to subxt/src/storage/storage_value.rs diff --git a/new/src/transactions.rs b/subxt/src/transactions.rs similarity index 100% rename from new/src/transactions.rs rename to subxt/src/transactions.rs diff --git a/new/src/transactions/account_nonce.rs b/subxt/src/transactions/account_nonce.rs similarity index 100% rename from new/src/transactions/account_nonce.rs rename to subxt/src/transactions/account_nonce.rs diff --git a/new/src/transactions/default_params.rs b/subxt/src/transactions/default_params.rs similarity index 100% rename from new/src/transactions/default_params.rs rename to subxt/src/transactions/default_params.rs diff --git a/new/src/transactions/payload.rs b/subxt/src/transactions/payload.rs similarity index 100% rename from new/src/transactions/payload.rs rename to subxt/src/transactions/payload.rs diff --git a/new/src/transactions/signer.rs b/subxt/src/transactions/signer.rs similarity index 100% rename from new/src/transactions/signer.rs rename to subxt/src/transactions/signer.rs diff --git a/new/src/transactions/transaction_progress.rs b/subxt/src/transactions/transaction_progress.rs similarity index 91% rename from new/src/transactions/transaction_progress.rs rename to subxt/src/transactions/transaction_progress.rs index 385bf96261..d84710a5a0 100644 --- a/new/src/transactions/transaction_progress.rs +++ b/subxt/src/transactions/transaction_progress.rs @@ -1,6 +1,6 @@ use crate::backend::BlockRef; use crate::backend::{StreamOfResults, TransactionStatus as BackendTransactionStatus}; -use crate::client::OnlineClientAtBlockT; +use crate::client::{BlockNumberOrRef, OnlineClientAtBlockT}; use crate::config::{Config, HashFor}; use crate::error::{ DispatchError, TransactionEventsError, TransactionFinalizedSuccessError, @@ -104,7 +104,7 @@ where /// out if they finally made it into a block or not. pub async fn wait_for_finalized_success( self, - ) -> Result, TransactionFinalizedSuccessError> { + ) -> Result, TransactionFinalizedSuccessError> { let evs = self.wait_for_finalized().await?.wait_for_success().await?; Ok(evs) } @@ -258,9 +258,7 @@ impl<'atblock, T: Config, C: OnlineClientAtBlockT> TransactionInBlock<'atbloc /// /// **Note:** This has to download block details from the node and decode events /// from them. - pub async fn wait_for_success( - &self, - ) -> Result, TransactionEventsError> { + pub async fn wait_for_success(&self) -> Result, TransactionEventsError> { let events = self.fetch_events().await?; // Try to find any errors; return the first one we encounter. @@ -292,12 +290,18 @@ impl<'atblock, T: Config, C: OnlineClientAtBlockT> TransactionInBlock<'atbloc /// /// **Note:** This has to download block details from the node and decode events /// from them. - pub async fn fetch_events( - &self, - ) -> Result, TransactionEventsError> { - let hasher = self.client.hasher(); + pub async fn fetch_events(&self) -> Result, TransactionEventsError> { + // Create a client at the block the TX made it into: + let tx_block_ref = BlockNumberOrRef::BlockRef(self.block_ref.clone()); + let at_tx_block = self + .client + .at_block(tx_block_ref) + .await + .map_err(TransactionEventsError::CannotInstantiateClientAtBlock)?; - let block_body = self + let hasher = at_tx_block.client.hasher(); + + let block_body = at_tx_block .client .backend() .block_body(self.block_ref.hash()) @@ -324,15 +328,16 @@ impl<'atblock, T: Config, C: OnlineClientAtBlockT> TransactionInBlock<'atbloc transaction_hash: self.ext_hash.into(), })?; - let events = ExtrinsicEvents::fetch(self.client, self.extrinsic_hash(), extrinsic_index) - .await - .map_err( - |e| TransactionEventsError::CannotFetchEventsForTransaction { - block_hash: self.block_hash().into(), - transaction_hash: self.ext_hash.into(), - error: e, - }, - )?; + let events = + ExtrinsicEvents::fetch(&at_tx_block.client, self.extrinsic_hash(), extrinsic_index) + .await + .map_err( + |e| TransactionEventsError::CannotFetchEventsForTransaction { + block_hash: self.block_hash().into(), + transaction_hash: self.ext_hash.into(), + error: e, + }, + )?; Ok(events) } diff --git a/new/src/transactions/validation_result.rs b/subxt/src/transactions/validation_result.rs similarity index 100% rename from new/src/transactions/validation_result.rs rename to subxt/src/transactions/validation_result.rs diff --git a/subxt/src/tx/mod.rs b/subxt/src/tx/mod.rs deleted file mode 100644 index f15a027f99..0000000000 --- a/subxt/src/tx/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Create and submit extrinsics. -//! -//! An extrinsic is submitted with an "signed extra" and "additional" parameters, which can be -//! different for each chain. The trait [`crate::config::ExtrinsicParams`] determines exactly which -//! additional and signed extra parameters are used when constructing an extrinsic, and is a part -//! of the chain configuration (see [`crate::config::Config`]). - -mod tx_client; -mod tx_progress; - -pub use subxt_core::tx::payload::{DefaultPayload, DynamicPayload, Payload, dynamic}; -pub use subxt_core::tx::signer::{self, Signer}; -pub use tx_client::{ - DefaultParams, PartialTransaction, SubmittableTransaction, TransactionInvalid, - TransactionUnknown, TxClient, ValidationResult, -}; -pub use tx_progress::{TxInBlock, TxProgress, TxStatus}; diff --git a/subxt/src/tx/tx_client.rs b/subxt/src/tx/tx_client.rs deleted file mode 100644 index 030846a998..0000000000 --- a/subxt/src/tx/tx_client.rs +++ /dev/null @@ -1,997 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::{ - backend::{BackendExt, BlockRef, TransactionStatus}, - client::{OfflineClientT, OnlineClientT}, - config::{Config, ExtrinsicParams, HashFor, Header}, - error::{ExtrinsicError, TransactionStatusError}, - tx::{Payload, Signer as SignerT, TxProgress}, - utils::PhantomDataSendSync, -}; -use codec::{Compact, Decode, Encode}; -use derive_where::derive_where; -use futures::future::{TryFutureExt, try_join}; -use subxt_core::tx::TransactionVersion; - -/// A client for working with transactions. -#[derive_where(Clone; Client)] -pub struct TxClient { - client: Client, - _marker: PhantomDataSendSync, -} - -impl TxClient { - /// Create a new [`TxClient`] - pub fn new(client: Client) -> Self { - Self { - client, - _marker: PhantomDataSendSync::new(), - } - } -} - -impl> TxClient { - /// Run the validation logic against some transaction you'd like to submit. Returns `Ok(())` - /// if the call is valid (or if it's not possible to check since the call has no validation hash). - /// Return an error if the call was not valid or something went wrong trying to validate it (ie - /// the pallet or call in question do not exist at all). - pub fn validate(&self, call: &Call) -> Result<(), ExtrinsicError> - where - Call: Payload, - { - subxt_core::tx::validate(call, &self.client.metadata()).map_err(Into::into) - } - - /// Return the SCALE encoded bytes representing the call data of the transaction. - pub fn call_data(&self, call: &Call) -> Result, ExtrinsicError> - where - Call: Payload, - { - subxt_core::tx::call_data(call, &self.client.metadata()).map_err(Into::into) - } - - /// Creates an unsigned transaction without submitting it. Depending on the metadata, we might end - /// up constructing either a v4 or v5 transaction. See [`Self::create_v4_unsigned`] or - /// [`Self::create_v5_bare`] if you'd like to explicitly create an unsigned transaction of a certain version. - pub fn create_unsigned( - &self, - call: &Call, - ) -> Result, ExtrinsicError> - where - Call: Payload, - { - let metadata = self.client.metadata(); - let tx = match subxt_core::tx::suggested_version(&metadata)? { - TransactionVersion::V4 => subxt_core::tx::create_v4_unsigned(call, &metadata), - TransactionVersion::V5 => subxt_core::tx::create_v5_bare(call, &metadata), - }?; - - Ok(SubmittableTransaction { - client: self.client.clone(), - inner: tx, - }) - } - - /// Creates a v4 unsigned (no signature or transaction extensions) transaction without submitting it. - /// - /// Prefer [`Self::create_unsigned()`] if you don't know which version to create; this will pick the - /// most suitable one for the given chain. - pub fn create_v4_unsigned( - &self, - call: &Call, - ) -> Result, ExtrinsicError> - where - Call: Payload, - { - let metadata = self.client.metadata(); - let tx = subxt_core::tx::create_v4_unsigned(call, &metadata)?; - - Ok(SubmittableTransaction { - client: self.client.clone(), - inner: tx, - }) - } - - /// Creates a v5 "bare" (no signature or transaction extensions) transaction without submitting it. - /// - /// Prefer [`Self::create_unsigned()`] if you don't know which version to create; this will pick the - /// most suitable one for the given chain. - pub fn create_v5_bare( - &self, - call: &Call, - ) -> Result, ExtrinsicError> - where - Call: Payload, - { - let metadata = self.client.metadata(); - let tx = subxt_core::tx::create_v5_bare(call, &metadata)?; - - Ok(SubmittableTransaction { - client: self.client.clone(), - inner: tx, - }) - } - - /// Create a partial transaction. Depending on the metadata, we might end up constructing either a v4 or - /// v5 transaction. See [`subxt_core::tx`] if you'd like to manually pick the version to construct - /// - /// Note: if not provided, the default account nonce will be set to 0 and the default mortality will be _immortal_. - /// This is because this method runs offline, and so is unable to fetch the data needed for more appropriate values. - pub fn create_partial_offline( - &self, - call: &Call, - params: >::Params, - ) -> Result, ExtrinsicError> - where - Call: Payload, - { - let metadata = self.client.metadata(); - let tx = match subxt_core::tx::suggested_version(&metadata)? { - TransactionVersion::V4 => PartialTransactionInner::V4( - subxt_core::tx::create_v4_signed(call, &self.client.client_state(), params)?, - ), - TransactionVersion::V5 => PartialTransactionInner::V5( - subxt_core::tx::create_v5_general(call, &self.client.client_state(), params)?, - ), - }; - - Ok(PartialTransaction { - client: self.client.clone(), - inner: tx, - }) - } - - /// Create a v4 partial transaction, ready to sign. - /// - /// Note: if not provided, the default account nonce will be set to 0 and the default mortality will be _immortal_. - /// This is because this method runs offline, and so is unable to fetch the data needed for more appropriate values. - /// - /// Prefer [`Self::create_partial_offline()`] if you don't know which version to create; this will pick the - /// most suitable one for the given chain. - pub fn create_v4_partial_offline( - &self, - call: &Call, - params: >::Params, - ) -> Result, ExtrinsicError> - where - Call: Payload, - { - let tx = PartialTransactionInner::V4(subxt_core::tx::create_v4_signed( - call, - &self.client.client_state(), - params, - )?); - - Ok(PartialTransaction { - client: self.client.clone(), - inner: tx, - }) - } - - /// Create a v5 partial transaction, ready to sign. - /// - /// Note: if not provided, the default account nonce will be set to 0 and the default mortality will be _immortal_. - /// This is because this method runs offline, and so is unable to fetch the data needed for more appropriate values. - /// - /// Prefer [`Self::create_partial_offline()`] if you don't know which version to create; this will pick the - /// most suitable one for the given chain. - pub fn create_v5_partial_offline( - &self, - call: &Call, - params: >::Params, - ) -> Result, ExtrinsicError> - where - Call: Payload, - { - let tx = PartialTransactionInner::V5(subxt_core::tx::create_v5_general( - call, - &self.client.client_state(), - params, - )?); - - Ok(PartialTransaction { - client: self.client.clone(), - inner: tx, - }) - } -} - -impl TxClient -where - T: Config, - C: OnlineClientT, -{ - /// Get the account nonce for a given account ID. - pub async fn account_nonce(&self, account_id: &T::AccountId) -> Result { - let block_ref = self - .client - .backend() - .latest_finalized_block_ref() - .await - .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; - - crate::blocks::get_account_nonce(&self.client, account_id, block_ref.hash()) - .await - .map_err(|e| ExtrinsicError::AccountNonceError { - block_hash: block_ref.hash().into(), - account_id: account_id.encode().into(), - reason: e, - }) - } - - /// Creates a partial transaction, without submitting it. This can then be signed and submitted. - pub async fn create_partial( - &self, - call: &Call, - account_id: &T::AccountId, - mut params: >::Params, - ) -> Result, ExtrinsicError> - where - Call: Payload, - { - inject_account_nonce_and_block(&self.client, account_id, &mut params).await?; - self.create_partial_offline(call, params) - } - - /// Creates a partial V4 transaction, without submitting it. This can then be signed and submitted. - /// - /// Prefer [`Self::create_partial()`] if you don't know which version to create; this will pick the - /// most suitable one for the given chain. - pub async fn create_v4_partial( - &self, - call: &Call, - account_id: &T::AccountId, - mut params: >::Params, - ) -> Result, ExtrinsicError> - where - Call: Payload, - { - inject_account_nonce_and_block(&self.client, account_id, &mut params).await?; - self.create_v4_partial_offline(call, params) - } - - /// Creates a partial V5 transaction, without submitting it. This can then be signed and submitted. - /// - /// Prefer [`Self::create_partial()`] if you don't know which version to create; this will pick the - /// most suitable one for the given chain. - pub async fn create_v5_partial( - &self, - call: &Call, - account_id: &T::AccountId, - mut params: >::Params, - ) -> Result, ExtrinsicError> - where - Call: Payload, - { - inject_account_nonce_and_block(&self.client, account_id, &mut params).await?; - self.create_v5_partial_offline(call, params) - } - - /// Creates a signed transaction, without submitting it. - pub async fn create_signed( - &mut self, - call: &Call, - signer: &Signer, - params: >::Params, - ) -> Result, ExtrinsicError> - where - Call: Payload, - Signer: SignerT, - { - let mut partial = self - .create_partial(call, &signer.account_id(), params) - .await?; - - Ok(partial.sign(signer)) - } - - /// Creates and signs an transaction and submits it to the chain. Passes default parameters - /// to construct the "signed extra" and "additional" payloads needed by the transaction. - /// - /// Returns a [`TxProgress`], which can be used to track the status of the transaction - /// and obtain details about it, once it has made it into a block. - pub async fn sign_and_submit_then_watch_default( - &mut self, - call: &Call, - signer: &Signer, - ) -> Result, ExtrinsicError> - where - Call: Payload, - Signer: SignerT, - >::Params: DefaultParams, - { - self.sign_and_submit_then_watch(call, signer, DefaultParams::default_params()) - .await - } - - /// Creates and signs an transaction and submits it to the chain. - /// - /// Returns a [`TxProgress`], which can be used to track the status of the transaction - /// and obtain details about it, once it has made it into a block. - pub async fn sign_and_submit_then_watch( - &mut self, - call: &Call, - signer: &Signer, - params: >::Params, - ) -> Result, ExtrinsicError> - where - Call: Payload, - Signer: SignerT, - { - self.create_signed(call, signer, params) - .await? - .submit_and_watch() - .await - } - - /// Creates and signs an transaction and submits to the chain for block inclusion. Passes - /// default parameters to construct the "signed extra" and "additional" payloads needed - /// by the transaction. - /// - /// Returns `Ok` with the transaction hash if it is valid transaction. - /// - /// # Note - /// - /// Success does not mean the transaction has been included in the block, just that it is valid - /// and has been included in the transaction pool. - pub async fn sign_and_submit_default( - &mut self, - call: &Call, - signer: &Signer, - ) -> Result, ExtrinsicError> - where - Call: Payload, - Signer: SignerT, - >::Params: DefaultParams, - { - self.sign_and_submit(call, signer, DefaultParams::default_params()) - .await - } - - /// Creates and signs an transaction and submits to the chain for block inclusion. - /// - /// Returns `Ok` with the transaction hash if it is valid transaction. - /// - /// # Note - /// - /// Success does not mean the transaction has been included in the block, just that it is valid - /// and has been included in the transaction pool. - pub async fn sign_and_submit( - &mut self, - call: &Call, - signer: &Signer, - params: >::Params, - ) -> Result, ExtrinsicError> - where - Call: Payload, - Signer: SignerT, - { - self.create_signed(call, signer, params) - .await? - .submit() - .await - } -} - -/// This payload contains the information needed to produce an transaction. -pub struct PartialTransaction { - client: C, - inner: PartialTransactionInner, -} - -enum PartialTransactionInner { - V4(subxt_core::tx::PartialTransactionV4), - V5(subxt_core::tx::PartialTransactionV5), -} - -impl PartialTransaction -where - T: Config, - C: OfflineClientT, -{ - /// Return the signer payload for this transaction. These are the bytes that must - /// be signed in order to produce a valid signature for the transaction. - pub fn signer_payload(&self) -> Vec { - match &self.inner { - PartialTransactionInner::V4(tx) => tx.signer_payload(), - PartialTransactionInner::V5(tx) => tx.signer_payload().to_vec(), - } - } - - /// Return the bytes representing the call data for this partially constructed - /// transaction. - pub fn call_data(&self) -> &[u8] { - match &self.inner { - PartialTransactionInner::V4(tx) => tx.call_data(), - PartialTransactionInner::V5(tx) => tx.call_data(), - } - } - - /// Convert this [`PartialTransaction`] into a [`SubmittableTransaction`], ready to submit. - /// The provided `signer` is responsible for providing the "from" address for the transaction, - /// as well as providing a signature to attach to it. - pub fn sign(&mut self, signer: &Signer) -> SubmittableTransaction - where - Signer: SignerT, - { - let tx = match &mut self.inner { - PartialTransactionInner::V4(tx) => tx.sign(signer), - PartialTransactionInner::V5(tx) => tx.sign(signer), - }; - - SubmittableTransaction { - client: self.client.clone(), - inner: tx, - } - } - - /// Convert this [`PartialTransaction`] into a [`SubmittableTransaction`], ready to submit. - /// An address, and something representing a signature that can be SCALE encoded, are both - /// needed in order to construct it. If you have a `Signer` to hand, you can use - /// [`PartialTransaction::sign()`] instead. - pub fn sign_with_account_and_signature( - &mut self, - account_id: &T::AccountId, - signature: &T::Signature, - ) -> SubmittableTransaction { - let tx = match &mut self.inner { - PartialTransactionInner::V4(tx) => { - tx.sign_with_account_and_signature(account_id.clone(), signature) - } - PartialTransactionInner::V5(tx) => { - tx.sign_with_account_and_signature(account_id, signature) - } - }; - - SubmittableTransaction { - client: self.client.clone(), - inner: tx, - } - } -} - -/// This represents an transaction that has been signed and is ready to submit. -pub struct SubmittableTransaction { - client: C, - inner: subxt_core::tx::Transaction, -} - -impl SubmittableTransaction -where - T: Config, - C: OfflineClientT, -{ - /// Create a [`SubmittableTransaction`] from some already-signed and prepared - /// transaction bytes, and some client (anything implementing [`OfflineClientT`] - /// or [`OnlineClientT`]). - /// - /// Prefer to use [`TxClient`] to create and sign transactions. This is simply - /// exposed in case you want to skip this process and submit something you've - /// already created. - pub fn from_bytes(client: C, tx_bytes: Vec) -> Self { - Self { - client, - inner: subxt_core::tx::Transaction::from_bytes(tx_bytes), - } - } - - /// Calculate and return the hash of the transaction, based on the configured hasher. - pub fn hash(&self) -> HashFor { - self.inner.hash_with(self.client.hasher()) - } - - /// Returns the SCALE encoded transaction bytes. - pub fn encoded(&self) -> &[u8] { - self.inner.encoded() - } - - /// Consumes [`SubmittableTransaction`] and returns the SCALE encoded - /// transaction bytes. - pub fn into_encoded(self) -> Vec { - self.inner.into_encoded() - } -} - -impl SubmittableTransaction -where - T: Config, - C: OnlineClientT, -{ - /// Submits the transaction to the chain. - /// - /// Returns a [`TxProgress`], which can be used to track the status of the transaction - /// and obtain details about it, once it has made it into a block. - pub async fn submit_and_watch(&self) -> Result, ExtrinsicError> { - // Get a hash of the transaction (we'll need this later). - let ext_hash = self.hash(); - - // Submit and watch for transaction progress. - let sub = self - .client - .backend() - .submit_transaction(self.encoded()) - .await - .map_err(ExtrinsicError::ErrorSubmittingTransaction)?; - - Ok(TxProgress::new(sub, self.client.clone(), ext_hash)) - } - - /// Submits the transaction to the chain for block inclusion. - /// - /// It's usually better to call `submit_and_watch` to get an idea of the progress of the - /// submission and whether it's eventually successful or not. This call does not guarantee - /// success, and is just sending the transaction to the chain. - pub async fn submit(&self) -> Result, ExtrinsicError> { - let ext_hash = self.hash(); - let mut sub = self - .client - .backend() - .submit_transaction(self.encoded()) - .await - .map_err(ExtrinsicError::ErrorSubmittingTransaction)?; - - // If we get a bad status or error back straight away then error, else return the hash. - match sub.next().await { - Some(Ok(status)) => match status { - TransactionStatus::Validated - | TransactionStatus::Broadcasted - | TransactionStatus::InBestBlock { .. } - | TransactionStatus::NoLongerInBestBlock - | TransactionStatus::InFinalizedBlock { .. } => Ok(ext_hash), - TransactionStatus::Error { message } => Err( - ExtrinsicError::TransactionStatusError(TransactionStatusError::Error(message)), - ), - TransactionStatus::Invalid { message } => { - Err(ExtrinsicError::TransactionStatusError( - TransactionStatusError::Invalid(message), - )) - } - TransactionStatus::Dropped { message } => { - Err(ExtrinsicError::TransactionStatusError( - TransactionStatusError::Dropped(message), - )) - } - }, - Some(Err(e)) => Err(ExtrinsicError::TransactionStatusStreamError(e)), - None => Err(ExtrinsicError::UnexpectedEndOfTransactionStatusStream), - } - } - - /// Validate a transaction by submitting it to the relevant Runtime API. A transaction that is - /// valid can be added to a block, but may still end up in an error state. - /// - /// Returns `Ok` with a [`ValidationResult`], which is the result of attempting to dry run the transaction. - pub async fn validate(&self) -> Result { - let latest_block_ref = self - .client - .backend() - .latest_finalized_block_ref() - .await - .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; - self.validate_at(latest_block_ref).await - } - - /// Validate a transaction by submitting it to the relevant Runtime API. A transaction that is - /// valid can be added to a block, but may still end up in an error state. - /// - /// Returns `Ok` with a [`ValidationResult`], which is the result of attempting to dry run the transaction. - pub async fn validate_at( - &self, - at: impl Into>>, - ) -> Result { - let block_hash = at.into().hash(); - - // Approach taken from https://github.com/paritytech/json-rpc-interface-spec/issues/55. - let mut params = Vec::with_capacity(8 + self.encoded().len() + 8); - 2u8.encode_to(&mut params); - params.extend(self.encoded().iter()); - block_hash.encode_to(&mut params); - - let res: Vec = self - .client - .backend() - .call( - "TaggedTransactionQueue_validate_transaction", - Some(¶ms), - block_hash, - ) - .await - .map_err(ExtrinsicError::CannotGetValidationInfo)?; - - ValidationResult::try_from_bytes(res) - } - - /// This returns an estimate for what the transaction is expected to cost to execute, less any tips. - /// The actual amount paid can vary from block to block based on node traffic and other factors. - pub async fn partial_fee_estimate(&self) -> Result { - let mut params = self.encoded().to_vec(); - (self.encoded().len() as u32).encode_to(&mut params); - let latest_block_ref = self - .client - .backend() - .latest_finalized_block_ref() - .await - .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; - - // destructuring RuntimeDispatchInfo, see type information - // data layout: {weight_ref_time: Compact, weight_proof_size: Compact, class: u8, partial_fee: u128} - let (_, _, _, partial_fee) = self - .client - .backend() - .call_decoding::<(Compact, Compact, u8, u128)>( - "TransactionPaymentApi_query_info", - Some(¶ms), - latest_block_ref.hash(), - ) - .await - .map_err(ExtrinsicError::CannotGetFeeInfo)?; - - Ok(partial_fee) - } -} - -/// Fetch the latest block header and account nonce from the backend and use them to refine [`ExtrinsicParams::Params`]. -async fn inject_account_nonce_and_block>( - client: &Client, - account_id: &T::AccountId, - params: &mut >::Params, -) -> Result<(), ExtrinsicError> { - use subxt_core::config::transaction_extensions::Params; - - let block_ref = client - .backend() - .latest_finalized_block_ref() - .await - .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock)?; - - let (block_header, account_nonce) = try_join( - client - .backend() - .block_header(block_ref.hash()) - .map_err(ExtrinsicError::CannotGetLatestFinalizedBlock), - crate::blocks::get_account_nonce(client, account_id, block_ref.hash()).map_err(|e| { - ExtrinsicError::AccountNonceError { - block_hash: block_ref.hash().into(), - account_id: account_id.encode().into(), - reason: e, - } - }), - ) - .await?; - - let block_header = block_header.ok_or_else(|| ExtrinsicError::CannotFindBlockHeader { - block_hash: block_ref.hash().into(), - })?; - - params.inject_account_nonce(account_nonce); - params.inject_block(block_header.number().into(), block_ref.hash()); - - Ok(()) -} - -impl ValidationResult { - #[allow(clippy::get_first)] - fn try_from_bytes(bytes: Vec) -> Result { - // TaggedTransactionQueue_validate_transaction returns this: - // https://github.com/paritytech/substrate/blob/0cdf7029017b70b7c83c21a4dc0aa1020e7914f6/primitives/runtime/src/transaction_validity.rs#L210 - // We copy some of the inner types and put the three states (valid, invalid, unknown) into one enum, - // because from our perspective, the call was successful regardless. - if bytes.get(0) == Some(&0) { - // ok: valid. Decode but, for now we discard most of the information - let res = TransactionValid::decode(&mut &bytes[1..]) - .map_err(ExtrinsicError::CannotDecodeValidationResult)?; - Ok(ValidationResult::Valid(res)) - } else if bytes.get(0) == Some(&1) && bytes.get(1) == Some(&0) { - // error: invalid - let res = TransactionInvalid::decode(&mut &bytes[2..]) - .map_err(ExtrinsicError::CannotDecodeValidationResult)?; - Ok(ValidationResult::Invalid(res)) - } else if bytes.get(0) == Some(&1) && bytes.get(1) == Some(&1) { - // error: unknown - let res = TransactionUnknown::decode(&mut &bytes[2..]) - .map_err(ExtrinsicError::CannotDecodeValidationResult)?; - Ok(ValidationResult::Unknown(res)) - } else { - // unable to decode the bytes; they aren't what we expect. - Err(ExtrinsicError::UnexpectedValidationResultBytes(bytes)) - } - } -} - -/// The result of performing [`SubmittableTransaction::validate()`]. -#[derive(Clone, Debug, PartialEq)] -pub enum ValidationResult { - /// The transaction is valid - Valid(TransactionValid), - /// The transaction is invalid - Invalid(TransactionInvalid), - /// Unable to validate the transaction - Unknown(TransactionUnknown), -} - -impl ValidationResult { - /// Is the transaction valid. - pub fn is_valid(&self) -> bool { - matches!(self, ValidationResult::Valid(_)) - } -} - -/// Transaction is valid; here is some more information about it. -#[derive(Decode, Clone, Debug, PartialEq)] -pub struct TransactionValid { - /// Priority of the transaction. - /// - /// Priority determines the ordering of two transactions that have all - /// their dependencies (required tags) satisfied. - pub priority: u64, - /// Transaction dependencies - /// - /// A non-empty list signifies that some other transactions which provide - /// given tags are required to be included before that one. - pub requires: Vec>, - /// Provided tags - /// - /// A list of tags this transaction provides. Successfully importing the transaction - /// will enable other transactions that depend on (require) those tags to be included as well. - /// Provided and required tags allow Substrate to build a dependency graph of transactions - /// and import them in the right (linear) order. - pub provides: Vec>, - /// Transaction longevity - /// - /// Longevity describes minimum number of blocks the validity is correct. - /// After this period transaction should be removed from the pool or revalidated. - pub longevity: u64, - /// A flag indicating if the transaction should be propagated to other peers. - /// - /// By setting `false` here the transaction will still be considered for - /// including in blocks that are authored on the current node, but will - /// never be sent to other peers. - pub propagate: bool, -} - -/// The runtime was unable to validate the transaction. -#[derive(Decode, Clone, Debug, PartialEq)] -pub enum TransactionUnknown { - /// Could not lookup some information that is required to validate the transaction. - CannotLookup, - /// No validator found for the given unsigned transaction. - NoUnsignedValidator, - /// Any other custom unknown validity that is not covered by this enum. - Custom(u8), -} - -/// The transaction is invalid. -#[derive(Decode, Clone, Debug, PartialEq)] -pub enum TransactionInvalid { - /// The call of the transaction is not expected. - Call, - /// General error to do with the inability to pay some fees (e.g. account balance too low). - Payment, - /// General error to do with the transaction not yet being valid (e.g. nonce too high). - Future, - /// General error to do with the transaction being outdated (e.g. nonce too low). - Stale, - /// General error to do with the transaction's proofs (e.g. signature). - /// - /// # Possible causes - /// - /// When using a signed extension that provides additional data for signing, it is required - /// that the signing and the verifying side use the same additional data. Additional - /// data will only be used to generate the signature, but will not be part of the transaction - /// itself. As the verifying side does not know which additional data was used while signing - /// it will only be able to assume a bad signature and cannot express a more meaningful error. - BadProof, - /// The transaction birth block is ancient. - /// - /// # Possible causes - /// - /// For `FRAME`-based runtimes this would be caused by `current block number` - /// - Era::birth block number > BlockHashCount`. (e.g. in Polkadot `BlockHashCount` = 2400, so - /// a transaction with birth block number 1337 would be valid up until block number 1337 + 2400, - /// after which point the transaction would be considered to have an ancient birth block.) - AncientBirthBlock, - /// The transaction would exhaust the resources of current block. - /// - /// The transaction might be valid, but there are not enough resources - /// left in the current block. - ExhaustsResources, - /// Any other custom invalid validity that is not covered by this enum. - Custom(u8), - /// An transaction with a Mandatory dispatch resulted in Error. This is indicative of either a - /// malicious validator or a buggy `provide_inherent`. In any case, it can result in - /// dangerously overweight blocks and therefore if found, invalidates the block. - BadMandatory, - /// An transaction with a mandatory dispatch tried to be validated. - /// This is invalid; only inherent transactions are allowed to have mandatory dispatches. - MandatoryValidation, - /// The sending address is disabled or known to be invalid. - BadSigner, -} - -/// This trait is used to create default values for extrinsic params. We use this instead of -/// [`Default`] because we want to be able to support params which are tuples of more than 12 -/// entries (which is the maximum tuple size Rust currently implements [`Default`] for on tuples), -/// given that we aren't far off having more than 12 transaction extensions already. -/// -/// If you have params which are _not_ a tuple and which you'd like to be instantiated automatically -/// when calling [`TxClient::sign_and_submit_default()`] or [`TxClient::sign_and_submit_then_watch_default()`], -/// then you'll need to implement this trait for them. -pub trait DefaultParams: Sized { - /// Instantiate a default instance of the parameters. - fn default_params() -> Self; -} - -impl DefaultParams for [P; N] { - fn default_params() -> Self { - core::array::from_fn(|_| P::default()) - } -} - -macro_rules! impl_default_params_for_tuple { - ($($ident:ident),+) => { - impl <$($ident : Default),+> DefaultParams for ($($ident,)+){ - fn default_params() -> Self { - ( - $($ident::default(),)+ - ) - } - } - } -} - -#[rustfmt::skip] -const _: () = { - impl_default_params_for_tuple!(A); - impl_default_params_for_tuple!(A, B); - impl_default_params_for_tuple!(A, B, C); - impl_default_params_for_tuple!(A, B, C, D); - impl_default_params_for_tuple!(A, B, C, D, E); - impl_default_params_for_tuple!(A, B, C, D, E, F); - impl_default_params_for_tuple!(A, B, C, D, E, F, G); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y); - impl_default_params_for_tuple!(A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z); -}; - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn transaction_validity_decoding_empty_bytes() { - // No panic should occur decoding empty bytes. - let decoded = ValidationResult::try_from_bytes(vec![]); - assert!(decoded.is_err()) - } - - #[test] - fn transaction_validity_decoding_is_ok() { - use sp_runtime::transaction_validity as sp; - use sp_runtime::transaction_validity::TransactionValidity as T; - - let pairs = vec![ - ( - T::Ok(sp::ValidTransaction { - ..Default::default() - }), - ValidationResult::Valid(TransactionValid { - // By default, tx is immortal - longevity: u64::MAX, - // Default is true - propagate: true, - priority: 0, - provides: vec![], - requires: vec![], - }), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::BadProof, - )), - ValidationResult::Invalid(TransactionInvalid::BadProof), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::Call, - )), - ValidationResult::Invalid(TransactionInvalid::Call), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::Payment, - )), - ValidationResult::Invalid(TransactionInvalid::Payment), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::Future, - )), - ValidationResult::Invalid(TransactionInvalid::Future), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::Stale, - )), - ValidationResult::Invalid(TransactionInvalid::Stale), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::AncientBirthBlock, - )), - ValidationResult::Invalid(TransactionInvalid::AncientBirthBlock), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::ExhaustsResources, - )), - ValidationResult::Invalid(TransactionInvalid::ExhaustsResources), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::BadMandatory, - )), - ValidationResult::Invalid(TransactionInvalid::BadMandatory), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::MandatoryValidation, - )), - ValidationResult::Invalid(TransactionInvalid::MandatoryValidation), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::BadSigner, - )), - ValidationResult::Invalid(TransactionInvalid::BadSigner), - ), - ( - T::Err(sp::TransactionValidityError::Invalid( - sp::InvalidTransaction::Custom(123), - )), - ValidationResult::Invalid(TransactionInvalid::Custom(123)), - ), - ( - T::Err(sp::TransactionValidityError::Unknown( - sp::UnknownTransaction::CannotLookup, - )), - ValidationResult::Unknown(TransactionUnknown::CannotLookup), - ), - ( - T::Err(sp::TransactionValidityError::Unknown( - sp::UnknownTransaction::NoUnsignedValidator, - )), - ValidationResult::Unknown(TransactionUnknown::NoUnsignedValidator), - ), - ( - T::Err(sp::TransactionValidityError::Unknown( - sp::UnknownTransaction::Custom(123), - )), - ValidationResult::Unknown(TransactionUnknown::Custom(123)), - ), - ]; - - for (sp, validation_result) in pairs { - let encoded = sp.encode(); - let decoded = ValidationResult::try_from_bytes(encoded).expect("should decode OK"); - assert_eq!(decoded, validation_result); - } - } -} diff --git a/subxt/src/tx/tx_progress.rs b/subxt/src/tx/tx_progress.rs deleted file mode 100644 index 83126dcd3e..0000000000 --- a/subxt/src/tx/tx_progress.rs +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Types representing extrinsics/transactions that have been submitted to a node. - -use std::task::Poll; - -use crate::{ - backend::{BlockRef, StreamOfResults, TransactionStatus as BackendTxStatus}, - client::OnlineClientT, - config::{Config, HashFor}, - error::{ - DispatchError, TransactionEventsError, TransactionFinalizedSuccessError, - TransactionProgressError, TransactionStatusError, - }, - events::EventsClient, - utils::strip_compact_prefix, -}; -use derive_where::derive_where; -use futures::{Stream, StreamExt}; - -/// This struct represents a subscription to the progress of some transaction. -pub struct TxProgress { - sub: Option>>>, - ext_hash: HashFor, - client: C, -} - -impl std::fmt::Debug for TxProgress { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TxProgress") - .field("sub", &"") - .field("ext_hash", &self.ext_hash) - .field("client", &"") - .finish() - } -} - -// The above type is not `Unpin` by default unless the generic param `T` is, -// so we manually make it clear that Unpin is actually fine regardless of `T` -// (we don't care if this moves around in memory while it's "pinned"). -impl Unpin for TxProgress {} - -impl TxProgress { - /// Instantiate a new [`TxProgress`] from a custom subscription. - pub fn new( - sub: StreamOfResults>>, - client: C, - ext_hash: HashFor, - ) -> Self { - Self { - sub: Some(sub), - client, - ext_hash, - } - } - - /// Return the hash of the extrinsic. - pub fn extrinsic_hash(&self) -> HashFor { - self.ext_hash - } -} - -impl TxProgress -where - T: Config, - C: OnlineClientT, -{ - /// Return the next transaction status when it's emitted. This just delegates to the - /// [`futures::Stream`] implementation for [`TxProgress`], but allows you to - /// avoid importing that trait if you don't otherwise need it. - pub async fn next(&mut self) -> Option, TransactionProgressError>> { - StreamExt::next(self).await - } - - /// Wait for the transaction to be finalized, and return a [`TxInBlock`] - /// instance when it is, or an error if there was a problem waiting for finalization. - /// - /// **Note:** consumes `self`. If you'd like to perform multiple actions as the state of the - /// transaction progresses, use [`TxProgress::next()`] instead. - /// - /// **Note:** transaction statuses like `Invalid`/`Usurped`/`Dropped` indicate with some - /// probability that the transaction will not make it into a block but there is no guarantee - /// that this is true. In those cases the stream is closed however, so you currently have no way to find - /// out if they finally made it into a block or not. - pub async fn wait_for_finalized(mut self) -> Result, TransactionProgressError> { - while let Some(status) = self.next().await { - match status? { - // Finalized! Return. - TxStatus::InFinalizedBlock(s) => return Ok(s), - // Error scenarios; return the error. - TxStatus::Error { message } => { - return Err(TransactionStatusError::Error(message).into()); - } - TxStatus::Invalid { message } => { - return Err(TransactionStatusError::Invalid(message).into()); - } - TxStatus::Dropped { message } => { - return Err(TransactionStatusError::Dropped(message).into()); - } - // Ignore and wait for next status event: - _ => continue, - } - } - Err(TransactionProgressError::UnexpectedEndOfTransactionStatusStream) - } - - /// Wait for the transaction to be finalized, and for the transaction events to indicate - /// that the transaction was successful. Returns the events associated with the transaction, - /// as well as a couple of other details (block hash and extrinsic hash). - /// - /// **Note:** consumes self. If you'd like to perform multiple actions as progress is made, - /// use [`TxProgress::next()`] instead. - /// - /// **Note:** transaction statuses like `Invalid`/`Usurped`/`Dropped` indicate with some - /// probability that the transaction will not make it into a block but there is no guarantee - /// that this is true. In those cases the stream is closed however, so you currently have no way to find - /// out if they finally made it into a block or not. - pub async fn wait_for_finalized_success( - self, - ) -> Result, TransactionFinalizedSuccessError> { - let evs = self.wait_for_finalized().await?.wait_for_success().await?; - Ok(evs) - } -} - -impl Stream for TxProgress { - type Item = Result, TransactionProgressError>; - - fn poll_next( - mut self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - let sub = match self.sub.as_mut() { - Some(sub) => sub, - None => return Poll::Ready(None), - }; - - sub.poll_next_unpin(cx) - .map_err(TransactionProgressError::CannotGetNextProgressUpdate) - .map_ok(|status| { - match status { - BackendTxStatus::Validated => TxStatus::Validated, - BackendTxStatus::Broadcasted => TxStatus::Broadcasted, - BackendTxStatus::NoLongerInBestBlock => TxStatus::NoLongerInBestBlock, - BackendTxStatus::InBestBlock { hash } => TxStatus::InBestBlock(TxInBlock::new( - hash, - self.ext_hash, - self.client.clone(), - )), - // These stream events mean that nothing further will be sent: - BackendTxStatus::InFinalizedBlock { hash } => { - self.sub = None; - TxStatus::InFinalizedBlock(TxInBlock::new( - hash, - self.ext_hash, - self.client.clone(), - )) - } - BackendTxStatus::Error { message } => { - self.sub = None; - TxStatus::Error { message } - } - BackendTxStatus::Invalid { message } => { - self.sub = None; - TxStatus::Invalid { message } - } - BackendTxStatus::Dropped { message } => { - self.sub = None; - TxStatus::Dropped { message } - } - } - }) - } -} - -/// Possible transaction statuses returned from our [`TxProgress::next()`] call. -#[derive_where(Debug; C)] -pub enum TxStatus { - /// Transaction is part of the future queue. - Validated, - /// The transaction has been broadcast to other nodes. - Broadcasted, - /// Transaction is no longer in a best block. - NoLongerInBestBlock, - /// Transaction has been included in block with given hash. - InBestBlock(TxInBlock), - /// Transaction has been finalized by a finality-gadget, e.g GRANDPA - InFinalizedBlock(TxInBlock), - /// Something went wrong in the node. - Error { - /// Human readable message; what went wrong. - message: String, - }, - /// Transaction is invalid (bad nonce, signature etc). - Invalid { - /// Human readable message; why was it invalid. - message: String, - }, - /// The transaction was dropped. - Dropped { - /// Human readable message; why was it dropped. - message: String, - }, -} - -impl TxStatus { - /// A convenience method to return the finalized details. Returns - /// [`None`] if the enum variant is not [`TxStatus::InFinalizedBlock`]. - pub fn as_finalized(&self) -> Option<&TxInBlock> { - match self { - Self::InFinalizedBlock(val) => Some(val), - _ => None, - } - } - - /// A convenience method to return the best block details. Returns - /// [`None`] if the enum variant is not [`TxStatus::InBestBlock`]. - pub fn as_in_block(&self) -> Option<&TxInBlock> { - match self { - Self::InBestBlock(val) => Some(val), - _ => None, - } - } -} - -/// This struct represents a transaction that has made it into a block. -#[derive_where(Debug; C)] -pub struct TxInBlock { - block_ref: BlockRef>, - ext_hash: HashFor, - client: C, -} - -impl TxInBlock { - pub(crate) fn new(block_ref: BlockRef>, ext_hash: HashFor, client: C) -> Self { - Self { - block_ref, - ext_hash, - client, - } - } - - /// Return the hash of the block that the transaction has made it into. - pub fn block_hash(&self) -> HashFor { - self.block_ref.hash() - } - - /// Return the hash of the extrinsic that was submitted. - pub fn extrinsic_hash(&self) -> HashFor { - self.ext_hash - } -} - -impl> TxInBlock { - /// Fetch the events associated with this transaction. If the transaction - /// was successful (ie no `ExtrinsicFailed`) events were found, then we return - /// the events associated with it. If the transaction was not successful, or - /// something else went wrong, we return an error. - /// - /// **Note:** If multiple `ExtrinsicFailed` errors are returned (for instance - /// because a pallet chooses to emit one as an event, which is considered - /// abnormal behaviour), it is not specified which of the errors is returned here. - /// You can use [`TxInBlock::fetch_events`] instead if you'd like to - /// work with multiple "error" events. - /// - /// **Note:** This has to download block details from the node and decode events - /// from them. - pub async fn wait_for_success( - &self, - ) -> Result, TransactionEventsError> { - let events = self.fetch_events().await?; - - // Try to find any errors; return the first one we encounter. - for (ev_idx, ev) in events.iter().enumerate() { - let ev = ev.map_err(|e| TransactionEventsError::CannotDecodeEventInBlock { - event_index: ev_idx, - block_hash: self.block_hash().into(), - error: e, - })?; - - if ev.pallet_name() == "System" && ev.variant_name() == "ExtrinsicFailed" { - let dispatch_error = - DispatchError::decode_from(ev.field_bytes(), self.client.metadata()).map_err( - |e| TransactionEventsError::CannotDecodeDispatchError { - error: e, - bytes: ev.field_bytes().to_vec(), - }, - )?; - return Err(dispatch_error.into()); - } - } - - Ok(events) - } - - /// Fetch all of the events associated with this transaction. This succeeds whether - /// the transaction was a success or not; it's up to you to handle the error and - /// success events however you prefer. - /// - /// **Note:** This has to download block details from the node and decode events - /// from them. - pub async fn fetch_events( - &self, - ) -> Result, TransactionEventsError> { - let hasher = self.client.hasher(); - - let block_body = self - .client - .backend() - .block_body(self.block_ref.hash()) - .await - .map_err(|e| TransactionEventsError::CannotFetchBlockBody { - block_hash: self.block_hash().into(), - error: e, - })? - .ok_or_else(|| TransactionEventsError::BlockNotFound { - block_hash: self.block_hash().into(), - })?; - - let extrinsic_idx = block_body - .iter() - .position(|ext| { - use crate::config::Hasher; - let Ok((_, stripped)) = strip_compact_prefix(ext) else { - return false; - }; - let hash = hasher.hash_of(&stripped); - hash == self.ext_hash - }) - // If we successfully obtain the block hash we think contains our - // extrinsic, the extrinsic should be in there somewhere.. - .ok_or_else(|| TransactionEventsError::CannotFindTransactionInBlock { - block_hash: self.block_hash().into(), - transaction_hash: self.ext_hash.into(), - })?; - - let events = EventsClient::new(self.client.clone()) - .at(self.block_ref.clone()) - .await - .map_err( - |e| TransactionEventsError::CannotFetchEventsForTransaction { - block_hash: self.block_hash().into(), - transaction_hash: self.ext_hash.into(), - error: e, - }, - )?; - - Ok(crate::blocks::ExtrinsicEvents::new( - self.ext_hash, - extrinsic_idx as u32, - events, - )) - } -} - -#[cfg(test)] -mod test { - use super::*; - use subxt_core::client::RuntimeVersion; - - use crate::{ - SubstrateConfig, - backend::{StreamOfResults, TransactionStatus}, - client::{OfflineClientT, OnlineClientT}, - config::{Config, HashFor}, - tx::TxProgress, - }; - - type MockTxProgress = TxProgress; - type MockHash = HashFor; - type MockSubstrateTxStatus = TransactionStatus; - - /// a mock client to satisfy trait bounds in tests - #[derive(Clone, Debug)] - struct MockClient; - - impl OfflineClientT for MockClient { - fn metadata(&self) -> crate::Metadata { - unimplemented!("just a mock impl to satisfy trait bounds") - } - - fn genesis_hash(&self) -> MockHash { - unimplemented!("just a mock impl to satisfy trait bounds") - } - - fn runtime_version(&self) -> RuntimeVersion { - unimplemented!("just a mock impl to satisfy trait bounds") - } - - fn hasher(&self) -> ::Hasher { - unimplemented!("just a mock impl to satisfy trait bounds") - } - - fn client_state(&self) -> subxt_core::client::ClientState { - unimplemented!("just a mock impl to satisfy trait bounds") - } - } - - impl OnlineClientT for MockClient { - fn backend(&self) -> &dyn crate::backend::Backend { - unimplemented!("just a mock impl to satisfy trait bounds") - } - } - - #[tokio::test] - async fn wait_for_finalized_returns_err_when_error() { - let tx_progress = mock_tx_progress(vec![ - MockSubstrateTxStatus::Broadcasted, - MockSubstrateTxStatus::Error { - message: "err".into(), - }, - ]); - let finalized_result = tx_progress.wait_for_finalized().await; - assert!(matches!( - finalized_result, - Err(TransactionProgressError::TransactionStatusError(TransactionStatusError::Error(e))) if e == "err" - )); - } - - #[tokio::test] - async fn wait_for_finalized_returns_err_when_invalid() { - let tx_progress = mock_tx_progress(vec![ - MockSubstrateTxStatus::Broadcasted, - MockSubstrateTxStatus::Invalid { - message: "err".into(), - }, - ]); - let finalized_result = tx_progress.wait_for_finalized().await; - assert!(matches!( - finalized_result, - Err(TransactionProgressError::TransactionStatusError(TransactionStatusError::Invalid(e))) if e == "err" - )); - } - - #[tokio::test] - async fn wait_for_finalized_returns_err_when_dropped() { - let tx_progress = mock_tx_progress(vec![ - MockSubstrateTxStatus::Broadcasted, - MockSubstrateTxStatus::Dropped { - message: "err".into(), - }, - ]); - let finalized_result = tx_progress.wait_for_finalized().await; - assert!(matches!( - finalized_result, - Err(TransactionProgressError::TransactionStatusError(TransactionStatusError::Dropped(e))) if e == "err" - )); - } - - fn mock_tx_progress(statuses: Vec) -> MockTxProgress { - let sub = create_substrate_tx_status_subscription(statuses); - TxProgress::new(sub, MockClient, Default::default()) - } - - fn create_substrate_tx_status_subscription( - elements: Vec, - ) -> StreamOfResults { - let results = elements.into_iter().map(Ok); - let stream = Box::pin(futures::stream::iter(results)); - let sub: StreamOfResults = StreamOfResults::new(stream); - sub - } -} diff --git a/new/src/utils.rs b/subxt/src/utils.rs similarity index 98% rename from new/src/utils.rs rename to subxt/src/utils.rs index 17264ce025..660ec94b99 100644 --- a/new/src/utils.rs +++ b/subxt/src/utils.rs @@ -4,9 +4,7 @@ //! Miscellaneous utility helpers. -mod account_id; mod account_id20; -pub mod bits; mod era; mod multi_address; mod multi_signature; @@ -16,10 +14,11 @@ mod unchecked_extrinsic; mod wrapper_opaque; mod yesnomaybe; +pub mod bits; + use codec::{Compact, Decode, Encode}; use derive_where::derive_where; -pub use account_id::AccountId32; pub use account_id20::AccountId20; pub use era::Era; pub use multi_address::MultiAddress; @@ -31,6 +30,8 @@ pub use unchecked_extrinsic::UncheckedExtrinsic; pub use wrapper_opaque::WrapperKeepOpaque; pub use yesnomaybe::{Maybe, No, NoMaybe, Yes, YesMaybe, YesNo}; +pub use subxt_utils_accountid32::AccountId32; + /// Wraps an already encoded byte vector, prevents being encoded as a raw byte vector as part of /// the transaction payload #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] diff --git a/new/src/utils/account_id20.rs b/subxt/src/utils/account_id20.rs similarity index 100% rename from new/src/utils/account_id20.rs rename to subxt/src/utils/account_id20.rs diff --git a/new/src/utils/bits.rs b/subxt/src/utils/bits.rs similarity index 100% rename from new/src/utils/bits.rs rename to subxt/src/utils/bits.rs diff --git a/new/src/utils/era.rs b/subxt/src/utils/era.rs similarity index 100% rename from new/src/utils/era.rs rename to subxt/src/utils/era.rs diff --git a/subxt/src/utils/fetch_chain_spec.rs b/subxt/src/utils/fetch_chain_spec.rs deleted file mode 100644 index b2881276c3..0000000000 --- a/subxt/src/utils/fetch_chain_spec.rs +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use crate::macros::{cfg_jsonrpsee_native, cfg_jsonrpsee_web}; -use serde_json::value::RawValue; - -/// Possible errors encountered trying to fetch a chain spec from an RPC node. -#[derive(thiserror::Error, Debug)] -#[allow(missing_docs)] -pub enum FetchChainspecError { - #[error("Cannot fetch chain spec: RPC error: {0}.")] - RpcError(String), - #[error("Cannot fetch chain spec: Invalid URL.")] - InvalidUrl, - #[error("Cannot fetch chain spec: Invalid URL scheme.")] - InvalidScheme, - #[error("Cannot fetch chain spec: Handshake error establishing WS connection.")] - HandshakeError, -} - -/// Fetch a chain spec from an RPC node at the given URL. -pub async fn fetch_chainspec_from_rpc_node( - url: impl AsRef, -) -> Result, FetchChainspecError> { - use jsonrpsee::core::client::{ClientT, SubscriptionClientT}; - use jsonrpsee::rpc_params; - - let client = jsonrpsee_helpers::client(url.as_ref()).await?; - - let result = client - .request("sync_state_genSyncSpec", jsonrpsee::rpc_params![true]) - .await - .map_err(|err| FetchChainspecError::RpcError(err.to_string()))?; - - // Subscribe to the finalized heads of the chain. - let mut subscription = SubscriptionClientT::subscribe::, _>( - &client, - "chain_subscribeFinalizedHeads", - rpc_params![], - "chain_unsubscribeFinalizedHeads", - ) - .await - .map_err(|err| FetchChainspecError::RpcError(err.to_string()))?; - - // We must ensure that the finalized block of the chain is not the block included - // in the chainSpec. - // This is a temporary workaround for: https://github.com/smol-dot/smoldot/issues/1562. - // The first finalized block that is received might by the finalized block could be the one - // included in the chainSpec. Decoding the chainSpec for this purpose is too complex. - let _ = subscription.next().await; - let _ = subscription.next().await; - - Ok(result) -} - -cfg_jsonrpsee_native! { - mod jsonrpsee_helpers { - use super::FetchChainspecError; - use tokio_util::compat::Compat; - - pub use jsonrpsee::{ - client_transport::ws::{self, EitherStream, Url, WsTransportClientBuilder}, - core::client::Client, - }; - - pub type Sender = ws::Sender>; - pub type Receiver = ws::Receiver>; - - /// Build WS RPC client from URL - pub async fn client(url: &str) -> Result { - let url = Url::parse(url).map_err(|_| FetchChainspecError::InvalidUrl)?; - - if url.scheme() != "ws" && url.scheme() != "wss" { - return Err(FetchChainspecError::InvalidScheme); - } - - let (sender, receiver) = ws_transport(url).await?; - - Ok(Client::builder() - .max_buffer_capacity_per_subscription(4096) - .build_with_tokio(sender, receiver)) - } - - async fn ws_transport(url: Url) -> Result<(Sender, Receiver), FetchChainspecError> { - WsTransportClientBuilder::default() - .build(url) - .await - .map_err(|_| FetchChainspecError::HandshakeError) - } - } -} - -cfg_jsonrpsee_web! { - mod jsonrpsee_helpers { - use super::FetchChainspecError; - pub use jsonrpsee::{ - client_transport::web, - core::client::{Client, ClientBuilder}, - }; - - /// Build web RPC client from URL - pub async fn client(url: &str) -> Result { - let (sender, receiver) = web::connect(url) - .await - .map_err(|_| FetchChainspecError::HandshakeError)?; - - Ok(ClientBuilder::default() - .max_buffer_capacity_per_subscription(4096) - .build_with_wasm(sender, receiver)) - } - } -} diff --git a/subxt/src/utils/mod.rs b/subxt/src/utils/mod.rs deleted file mode 100644 index 8d8893361d..0000000000 --- a/subxt/src/utils/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Miscellaneous utility helpers. - -use crate::macros::cfg_jsonrpsee; - -pub use subxt_core::utils::{ - AccountId32, Encoded, Era, H160, H256, H512, KeyedVec, MultiAddress, MultiSignature, - PhantomDataSendSync, Static, UncheckedExtrinsic, WrapperKeepOpaque, Yes, bits, - strip_compact_prefix, to_hex, -}; - -pub use subxt_rpcs::utils::url_is_secure; - -cfg_jsonrpsee! { - mod fetch_chain_spec; - pub use fetch_chain_spec::{fetch_chainspec_from_rpc_node, FetchChainspecError}; -} diff --git a/new/src/utils/multi_address.rs b/subxt/src/utils/multi_address.rs similarity index 100% rename from new/src/utils/multi_address.rs rename to subxt/src/utils/multi_address.rs diff --git a/core/src/utils/multi_signature.rs b/subxt/src/utils/multi_signature.rs similarity index 100% rename from core/src/utils/multi_signature.rs rename to subxt/src/utils/multi_signature.rs diff --git a/new/src/utils/range_map.rs b/subxt/src/utils/range_map.rs similarity index 100% rename from new/src/utils/range_map.rs rename to subxt/src/utils/range_map.rs diff --git a/new/src/utils/static_type.rs b/subxt/src/utils/static_type.rs similarity index 100% rename from new/src/utils/static_type.rs rename to subxt/src/utils/static_type.rs diff --git a/new/src/utils/unchecked_extrinsic.rs b/subxt/src/utils/unchecked_extrinsic.rs similarity index 100% rename from new/src/utils/unchecked_extrinsic.rs rename to subxt/src/utils/unchecked_extrinsic.rs diff --git a/new/src/utils/wrapper_opaque.rs b/subxt/src/utils/wrapper_opaque.rs similarity index 100% rename from new/src/utils/wrapper_opaque.rs rename to subxt/src/utils/wrapper_opaque.rs diff --git a/core/src/utils/yesnomaybe.rs b/subxt/src/utils/yesnomaybe.rs similarity index 100% rename from core/src/utils/yesnomaybe.rs rename to subxt/src/utils/yesnomaybe.rs diff --git a/new/src/view_functions.rs b/subxt/src/view_functions.rs similarity index 100% rename from new/src/view_functions.rs rename to subxt/src/view_functions.rs diff --git a/subxt/src/view_functions/mod.rs b/subxt/src/view_functions/mod.rs deleted file mode 100644 index df095bfb09..0000000000 --- a/subxt/src/view_functions/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -//! Types associated with executing View Function calls. - -mod view_function_types; -mod view_functions_client; - -pub use subxt_core::view_functions::payload::{DynamicPayload, Payload, StaticPayload, dynamic}; -pub use view_function_types::ViewFunctionsApi; -pub use view_functions_client::ViewFunctionsClient; diff --git a/new/src/view_functions/payload.rs b/subxt/src/view_functions/payload.rs similarity index 100% rename from new/src/view_functions/payload.rs rename to subxt/src/view_functions/payload.rs diff --git a/subxt/src/view_functions/view_function_types.rs b/subxt/src/view_functions/view_function_types.rs deleted file mode 100644 index 9860dca78e..0000000000 --- a/subxt/src/view_functions/view_function_types.rs +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::Payload; -use crate::{ - backend::BlockRef, - client::OnlineClientT, - config::{Config, HashFor}, - error::ViewFunctionError, -}; -use derive_where::derive_where; -use std::{future::Future, marker::PhantomData}; - -/// Execute View Function calls. -#[derive_where(Clone; Client)] -pub struct ViewFunctionsApi { - client: Client, - block_ref: BlockRef>, - _marker: PhantomData, -} - -impl ViewFunctionsApi { - /// Create a new [`ViewFunctionsApi`] - pub(crate) fn new(client: Client, block_ref: BlockRef>) -> Self { - Self { - client, - block_ref, - _marker: PhantomData, - } - } -} - -impl ViewFunctionsApi -where - T: Config, - Client: OnlineClientT, -{ - /// Run the validation logic against some View Function payload you'd like to use. Returns `Ok(())` - /// if the payload is valid (or if it's not possible to check since the payload has no validation hash). - /// Return an error if the payload was not valid or something went wrong trying to validate it (ie - /// the View Function in question do not exist at all) - pub fn validate(&self, payload: Call) -> Result<(), ViewFunctionError> { - subxt_core::view_functions::validate(payload, &self.client.metadata()).map_err(Into::into) - } - - /// Execute a View Function call. - pub fn call( - &self, - payload: Call, - ) -> impl Future> + use - { - let client = self.client.clone(); - let block_hash = self.block_ref.hash(); - // Ensure that the returned future doesn't have a lifetime tied to api.view_functions(), - // which is a temporary thing we'll be throwing away quickly: - async move { - let metadata = client.metadata(); - - // Validate the View Function payload hash against the compile hash from codegen. - subxt_core::view_functions::validate(&payload, &metadata)?; - - // Assemble the data to call the "execute_view_function" runtime API, which - // then calls the relevant view function. - let call_name = subxt_core::view_functions::CALL_NAME; - let call_args = subxt_core::view_functions::call_args(&payload, &metadata)?; - - // Make the call. - let bytes = client - .backend() - .call(call_name, Some(call_args.as_slice()), block_hash) - .await - .map_err(ViewFunctionError::CannotCallApi)?; - - // Decode the response. - let value = - subxt_core::view_functions::decode_value(&mut &*bytes, &payload, &metadata)?; - Ok(value) - } - } -} diff --git a/subxt/src/view_functions/view_functions_client.rs b/subxt/src/view_functions/view_functions_client.rs deleted file mode 100644 index cdd0efe5c4..0000000000 --- a/subxt/src/view_functions/view_functions_client.rs +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019-2025 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -use super::view_function_types::ViewFunctionsApi; - -use crate::{ - backend::BlockRef, - client::OnlineClientT, - config::{Config, HashFor}, - error::ViewFunctionError, -}; -use derive_where::derive_where; -use std::{future::Future, marker::PhantomData}; - -/// Make View Function calls at some block. -#[derive_where(Clone; Client)] -pub struct ViewFunctionsClient { - client: Client, - _marker: PhantomData, -} - -impl ViewFunctionsClient { - /// Create a new [`ViewFunctionsClient`] - pub fn new(client: Client) -> Self { - Self { - client, - _marker: PhantomData, - } - } -} - -impl ViewFunctionsClient -where - T: Config, - Client: OnlineClientT, -{ - /// Obtain an interface to call View Functions at some block hash. - pub fn at(&self, block_ref: impl Into>>) -> ViewFunctionsApi { - ViewFunctionsApi::new(self.client.clone(), block_ref.into()) - } - - /// Obtain an interface to call View Functions at the latest finalized block. - pub fn at_latest( - &self, - ) -> impl Future, ViewFunctionError>> + Send + 'static - { - // Clone and pass the client in like this so that we can explicitly - // return a Future that's Send + 'static, rather than tied to &self. - let client = self.client.clone(); - async move { - // get the ref for the latest finalized block and use that. - let block_ref = client - .backend() - .latest_finalized_block_ref() - .await - .map_err(ViewFunctionError::CannotGetLatestFinalizedBlock)?; - - Ok(ViewFunctionsApi::new(client, block_ref)) - } - } -} diff --git a/utils/accountid32/Cargo.toml b/utils/accountid32/Cargo.toml new file mode 100644 index 0000000000..85c897bb97 --- /dev/null +++ b/utils/accountid32/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "subxt-utils-accountid32" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true +autotests = false + +license.workspace = true +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "Subxt's AccountId32 representation" + +[dependencies] +codec = { workspace = true } +serde = { workspace = true, features = ["alloc"] } +thiserror = { workspace = true } +base58 = { workspace = true } +scale-encode = { workspace = true, features = ["default"] } +scale-decode = { workspace = true, features = ["default"] } +scale-info = { workspace = true, features = ["derive"] } +blake2 = { workspace = true } + +[dev-dependencies] +sp-core = { workspace = true } +sp-keyring = { workspace = true } + +[lints] +workspace = true \ No newline at end of file diff --git a/core/src/utils/account_id.rs b/utils/accountid32/src/lib.rs similarity index 98% rename from core/src/utils/account_id.rs rename to utils/accountid32/src/lib.rs index 8088a3354f..3bf299a2ce 100644 --- a/core/src/utils/account_id.rs +++ b/utils/accountid32/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// Copyright 2019-2025 Parity Technologies (UK) Ltd. // This file is dual-licensed as Apache-2.0 or GPL-3.0. // see LICENSE for license details. @@ -6,6 +6,10 @@ //! This doesn't contain much functionality itself, but is easy to convert to/from an `sp_core::AccountId32` //! for instance, to gain functionality without forcing a dependency on Substrate crates here. +#![no_std] + +extern crate alloc; + use alloc::format; use alloc::string::String; use alloc::vec; @@ -189,4 +193,4 @@ mod test { ); } } -} +} \ No newline at end of file diff --git a/utils/fetch-metadata/src/lib.rs b/utils/fetch-metadata/src/lib.rs index 779aceb725..75ad2f1fec 100644 --- a/utils/fetch-metadata/src/lib.rs +++ b/utils/fetch-metadata/src/lib.rs @@ -4,17 +4,12 @@ //! Subxt utils fetch metadata. -#![cfg_attr(docsrs, feature(doc_cfg))] - -// Internal helper macros -#[macro_use] -mod macros; mod error; -cfg_fetch_from_url! { - mod url; - pub use url::{from_url, from_url_blocking, MetadataVersion, Url}; -} +#[cfg(feature = "url")] +mod url; +#[cfg(feature = "url")] +pub use url::{from_url, from_url_blocking, MetadataVersion, Url}; pub use error::Error; diff --git a/utils/fetch-metadata/src/macros.rs b/utils/fetch-metadata/src/macros.rs deleted file mode 100644 index 4274331afa..0000000000 --- a/utils/fetch-metadata/src/macros.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019-2024 Parity Technologies (UK) Ltd. -// This file is dual-licensed as Apache-2.0 or GPL-3.0. -// see LICENSE for license details. - -macro_rules! cfg_feature { - ($feature:literal, $($item:item)*) => { - $( - #[cfg(feature = $feature)] - #[cfg_attr(docsrs, doc(cfg(feature = $feature)))] - $item - )* - } -} - -macro_rules! cfg_fetch_from_url { - ($($item:item)*) => { - crate::macros::cfg_feature!("url", $($item)*); - }; -} - -#[allow(unused)] -pub(crate) use {cfg_feature, cfg_fetch_from_url};