diff --git a/Cargo.lock b/Cargo.lock index 354b785305..c6a41ff2f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5823,6 +5823,56 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "subxt-new" +version = "0.44.0" +dependencies = [ + "assert_matches", + "async-trait", + "base58", + "bitvec", + "blake2", + "derive-where", + "either", + "frame-decode", + "frame-metadata 23.0.0", + "futures", + "hex", + "http-body", + "hyper", + "impl-serde", + "jsonrpsee", + "keccak-hash", + "parity-scale-codec", + "primitive-types", + "scale-bits", + "scale-decode", + "scale-encode", + "scale-info", + "scale-info-legacy", + "scale-value", + "serde", + "serde_json", + "sp-core", + "sp-crypto-hashing", + "sp-keyring", + "sp-runtime", + "subxt-lightclient", + "subxt-macro", + "subxt-metadata", + "subxt-rpcs", + "subxt-signer", + "thiserror 2.0.12", + "tokio", + "tokio-util", + "tower", + "tracing", + "tracing-subscriber", + "url", + "wasm-bindgen-futures", + "web-time", +] + [[package]] name = "subxt-rpcs" version = "0.44.0" diff --git a/Cargo.toml b/Cargo.toml index efb0b03d3b..93a19ffe27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = [ + "new", "cli", "codegen", "core", diff --git a/codegen/Cargo.toml b/codegen/Cargo.toml index ffb348d266..fdbc1a95f0 100644 --- a/codegen/Cargo.toml +++ b/codegen/Cargo.toml @@ -36,7 +36,6 @@ frame-metadata = { workspace = true } [package.metadata.docs.rs] features = ["default"] -rustdoc-args = ["--cfg", "docsrs"] [package.metadata.playground] default-features = true diff --git a/core/Cargo.toml b/core/Cargo.toml index f41477cc87..f1021bdb0a 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -74,7 +74,6 @@ hex = { workspace = true } [package.metadata.docs.rs] default-features = true -rustdoc-args = ["--cfg", "docsrs"] [package.metadata.playground] default-features = true diff --git a/lightclient/Cargo.toml b/lightclient/Cargo.toml index 7c94b5b2b8..e3445ee01c 100644 --- a/lightclient/Cargo.toml +++ b/lightclient/Cargo.toml @@ -71,7 +71,6 @@ getrandom = { workspace = true, optional = true } [package.metadata.docs.rs] default-features = true -rustdoc-args = ["--cfg", "docsrs"] [package.metadata.playground] default-features = true diff --git a/new/Cargo.toml b/new/Cargo.toml new file mode 100644 index 0000000000..5db50f5499 --- /dev/null +++ b/new/Cargo.toml @@ -0,0 +1,175 @@ +[package] +name = "subxt-new" +version.workspace = true +authors.workspace = true +edition.workspace = true +rust-version.workspace = true +publish = true + +license.workspace = true +readme = "../README.md" +repository.workspace = true +documentation.workspace = true +homepage.workspace = true +description = "Submit extrinsics (transactions) to a substrate node via RPC" +keywords = ["parity", "substrate", "blockchain"] + +[lints] +workspace = true + +[features] +# For dev and documentation reasons we enable more features than are often desired. +# it's recommended to use `--no-default-features` and then select what you need. +default = ["jsonrpsee", "native"] + +# Features that we expect to be enabled for documentation. +docs = [ + "default", + "unstable-light-client", + "runtime", + "reconnecting-rpc-client", +] + +# Enable this for native (ie non web/wasm builds). +# Exactly 1 of "web" and "native" is expected. +native = [ + "subxt-lightclient?/native", + "subxt-rpcs/native", + "tokio-util", + "tokio?/sync", + "sp-crypto-hashing/std", +] + +# Enable this for web/wasm builds. +# Exactly 1 of "web" and "native" is expected. +web = [ + "subxt-lightclient?/web", + "subxt-macro/web", + "subxt-rpcs/web", + "tokio?/sync", +] + +# Feature flag to enable the default future executor. +# Technically it's a hack enable to both but simplifies the conditional compilation +# and subxt is selecting executor based on the used platform. +# +# For instance `wasm-bindgen-futures` panics if the platform isn't wasm32 and +# similar for tokio that requires a tokio runtime to be initialized. +runtime = ["tokio/rt", "wasm-bindgen-futures"] + +# Enable this to use the reconnecting rpc client +reconnecting-rpc-client = ["subxt-rpcs/reconnecting-rpc-client"] + +# Enable this to use jsonrpsee, which enables the jsonrpsee RPC client, and +# a couple of util functions which rely on jsonrpsee. +jsonrpsee = [ + "dep:jsonrpsee", + "subxt-rpcs/jsonrpsee", + "runtime" +] + +# Enable this to fetch and utilize the latest unstable metadata from a node. +# The unstable metadata is subject to breaking changes and the subxt might +# fail to decode the metadata properly. Use this to experiment with the +# latest features exposed by the metadata. +unstable-metadata = [] + +# Activate this to expose the Light Client functionality. +# Note that this feature is experimental and things may break or not work as expected. +unstable-light-client = ["subxt-lightclient", "subxt-rpcs/unstable-light-client"] + +# Activate this to expose the ability to generate metadata from Wasm runtime files. +runtime-wasm-path = ["subxt-macro/runtime-wasm-path"] + +[dependencies] +async-trait = { workspace = true } +base58 = { workspace = true } +blake2 = { workspace = true } +codec = { package = "parity-scale-codec", workspace = true, features = ["derive"] } +derive-where = { workspace = true } +scale-info = { workspace = true, features = ["default"] } +scale-info-legacy = { workspace = true } +scale-value = { workspace = true, features = ["default"] } +scale-bits = { workspace = true, features = ["default"] } +scale-decode = { workspace = true, features = ["default"] } +scale-encode = { workspace = true, features = ["default"] } +futures = { workspace = true } +hex = { workspace = true } +impl-serde = { workspace = true, default-features = false } +keccak-hash = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true, features = ["default", "raw_value"] } +sp-crypto-hashing = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } +frame-metadata = { workspace = true } +frame-decode = { workspace = true, features = ["legacy-types"] } +either = { workspace = true } +web-time = { workspace = true } + +# Provides some deserialization, types like U256/H256 and hashing impls like twox/blake256: +primitive-types = { workspace = true, features = ["codec", "scale-info", "serde"] } + +# Included if the "jsonrpsee" feature is enabled. +jsonrpsee = { workspace = true, optional = true, features = ["jsonrpsee-types"] } + +# Other subxt crates we depend on. +subxt-macro = { workspace = true } +subxt-metadata = { workspace = true, features = ["std"] } +subxt-lightclient = { workspace = true, optional = true, default-features = false } +subxt-rpcs = { workspace = true } + +# For parsing urls to disallow insecure schemes +url = { workspace = true } + +# Included if "native" feature is enabled +tokio-util = { workspace = true, features = ["compat"], optional = true } + +# Included if the reconnecting rpc client feature is enabled +# Only the `tokio/sync` is used in the reconnecting rpc client +# and that compiles both for native and web. +tokio = { workspace = true, optional = true } +wasm-bindgen-futures = { workspace = true, optional = true } + +[dev-dependencies] +bitvec = { workspace = true } +codec = { workspace = true, features = ["derive", "bit-vec"] } +scale-info = { workspace = true, features = ["bit-vec"] } +tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread", "sync"] } +sp-core = { workspace = true, features = ["std"] } +sp-keyring = { workspace = true, features = ["std"] } +sp-runtime = { workspace = true, features = ["std"] } +assert_matches = { workspace = true } +subxt-signer = { path = "../signer", features = ["unstable-eth"] } +subxt-rpcs = { workspace = true, features = ["subxt", "mock-rpc-client"] } +# Tracing subscriber is useful for light-client examples to ensure that +# the `bootNodes` and chain spec are configured correctly. If all is fine, then +# the light-client will emit INFO logs with +# `GrandPa warp sync finished` and `Finalized block runtime ready.` +tracing-subscriber = { workspace = true } +# These deps are needed to test the reconnecting rpc client +jsonrpsee = { workspace = true, features = ["server"] } +tower = { workspace = true } +hyper = { workspace = true } +http-body = { workspace = true } + +[[example]] +name = "light_client_basic" +path = "examples/light_client_basic.rs" +required-features = ["unstable-light-client", "jsonrpsee"] + +[[example]] +name = "light_client_local_node" +path = "examples/light_client_local_node.rs" +required-features = ["unstable-light-client", "jsonrpsee", "native"] + +[[example]] +name = "setup_reconnecting_rpc_client" +path = "examples/setup_reconnecting_rpc_client.rs" +required-features = ["reconnecting-rpc-client"] + +[package.metadata.docs.rs] +features = ["docs"] + +[package.metadata.playground] +features = ["default", "unstable-light-client"] diff --git a/new/examples/block_decoding_dynamic.rs b/new/examples/block_decoding_dynamic.rs new file mode 100644 index 0000000000..44ba483221 --- /dev/null +++ b/new/examples/block_decoding_dynamic.rs @@ -0,0 +1,43 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, PolkadotConfig}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client that subscribes to blocks of the Polkadot network. + let api = OnlineClient::::from_url("wss://rpc.polkadot.io:443").await?; + + // Subscribe to all finalized blocks: + let mut blocks_sub = api.blocks().subscribe_finalized().await?; + while let Some(block) = blocks_sub.next().await { + let block = block?; + let block_number = block.header().number; + let block_hash = block.hash(); + println!("Block #{block_number} ({block_hash})"); + + // Decode each signed extrinsic in the block dynamically + let extrinsics = block.extrinsics().await?; + for ext in extrinsics.iter() { + let Some(transaction_extensions) = ext.transaction_extensions() else { + continue; // we do not look at inherents in this example + }; + + // Decode the fields into our dynamic Value type to display: + let fields = ext.decode_as_fields::()?; + + println!(" {}/{}", ext.pallet_name(), ext.call_name()); + println!(" Transaction Extensions:"); + for signed_ext in transaction_extensions.iter() { + // We only want to take a look at these 3 signed extensions, because the others all just have unit fields. + if ["CheckMortality", "CheckNonce", "ChargeTransactionPayment"] + .contains(&signed_ext.name()) + { + println!(" {}: {}", signed_ext.name(), signed_ext.value()?); + } + } + println!(" Fields:"); + println!(" {fields}\n"); + } + } + + Ok(()) +} diff --git a/new/examples/block_decoding_static.rs b/new/examples/block_decoding_static.rs new file mode 100644 index 0000000000..9af696bab4 --- /dev/null +++ b/new/examples/block_decoding_static.rs @@ -0,0 +1,64 @@ +#![allow(missing_docs)] +use subxt::{ + OnlineClient, PolkadotConfig, + utils::{AccountId32, MultiAddress}, +}; + +use codec::Decode; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +use polkadot::balances::calls::types::TransferKeepAlive; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client that subscribes to blocks of the Polkadot network. + let api = OnlineClient::::from_url("wss://rpc.polkadot.io:443").await?; + + // Subscribe to all finalized blocks: + let mut blocks_sub = api.blocks().subscribe_finalized().await?; + + // For each block, print details about the `TransferKeepAlive` transactions we are interested in. + while let Some(block) = blocks_sub.next().await { + let block = block?; + let block_number = block.header().number; + let block_hash = block.hash(); + println!("Block #{block_number} ({block_hash}):"); + + let extrinsics = block.extrinsics().await?; + for transfer in extrinsics.find::() { + let transfer = transfer?; + + let Some(extensions) = transfer.details.transaction_extensions() else { + panic!("TransferKeepAlive should be signed") + }; + + let addr_bytes = transfer + .details + .address_bytes() + .expect("TransferKeepAlive should be signed"); + let sender = MultiAddress::::decode(&mut &addr_bytes[..]) + .expect("Decoding should work"); + let sender = display_address(&sender); + let receiver = display_address(&transfer.value.dest); + let value = transfer.value.value; + let tip = extensions.tip().expect("Should have tip"); + let nonce = extensions.nonce().expect("Should have nonce"); + + println!( + " Transfer of {value} DOT:\n {sender} (Tip: {tip}, Nonce: {nonce}) ---> {receiver}", + ); + } + } + + Ok(()) +} + +fn display_address(addr: &MultiAddress) -> String { + if let MultiAddress::Id(id32) = addr { + format!("{id32}") + } else { + "MultiAddress::...".into() + } +} diff --git a/new/examples/blocks_subscribing.rs b/new/examples/blocks_subscribing.rs new file mode 100644 index 0000000000..f0f0a37d43 --- /dev/null +++ b/new/examples/blocks_subscribing.rs @@ -0,0 +1,63 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, PolkadotConfig}; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Subscribe to all finalized blocks: + let mut blocks_sub = api.blocks().subscribe_finalized().await?; + + // For each block, print a bunch of information about it: + while let Some(block) = blocks_sub.next().await { + let block = block?; + + let block_number = block.header().number; + let block_hash = block.hash(); + + println!("Block #{block_number}:"); + println!(" Hash: {block_hash}"); + println!(" Extrinsics:"); + + // Log each of the extrinsic with it's associated events: + let extrinsics = block.extrinsics().await?; + for ext in extrinsics.iter() { + let idx = ext.index(); + let events = ext.events().await?; + let bytes_hex = format!("0x{}", hex::encode(ext.bytes())); + + // See the API docs for more ways to decode extrinsics: + let decoded_ext = ext.as_root_extrinsic::(); + + println!(" Extrinsic #{idx}:"); + println!(" Bytes: {bytes_hex}"); + println!(" Decoded: {decoded_ext:?}"); + + println!(" Events:"); + for evt in events.iter() { + let evt = evt?; + let pallet_name = evt.pallet_name(); + let event_name = evt.variant_name(); + let event_values = evt.decode_as_fields::()?; + + println!(" {pallet_name}_{event_name}"); + println!(" {event_values}"); + } + + println!(" Transaction Extensions:"); + if let Some(transaction_extensions) = ext.transaction_extensions() { + for transaction_extension in transaction_extensions.iter() { + let name = transaction_extension.name(); + let value = transaction_extension.value()?.to_string(); + println!(" {name}: {value}"); + } + } + } + } + + Ok(()) +} diff --git a/new/examples/constants_dynamic.rs b/new/examples/constants_dynamic.rs new file mode 100644 index 0000000000..2d4ed4c5d7 --- /dev/null +++ b/new/examples/constants_dynamic.rs @@ -0,0 +1,26 @@ +#![allow(missing_docs)] +use subxt::dynamic::Value; +use subxt::{OnlineClient, PolkadotConfig}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // We can query a constant by providing a tuple of the pallet and constant name. The return type + // will be `Value` if we pass this query: + let constant_query = ("System", "BlockLength"); + let _value = api.constants().at(&constant_query)?; + + // Or we can use the library function to query a constant, which allows us to pass a generic type + // that Subxt will attempt to decode the constant into: + let constant_query = subxt::dynamic::constant::("System", "BlockLength"); + let value = api.constants().at(&constant_query)?; + + // Or we can obtain the bytes for the constant, using either form of query. + let bytes = api.constants().bytes_at(&constant_query)?; + + println!("Constant bytes: {:?}", bytes); + println!("Constant value: {}", value); + Ok(()) +} diff --git a/new/examples/constants_static.rs b/new/examples/constants_static.rs new file mode 100644 index 0000000000..2bb1aecbf6 --- /dev/null +++ b/new/examples/constants_static.rs @@ -0,0 +1,24 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, PolkadotConfig}; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // A query to obtain some constant: + let constant_query = polkadot::constants().system().block_length(); + + // Obtain the value: + let value = api.constants().at(&constant_query)?; + + // Or obtain the bytes: + let bytes = api.constants().bytes_at(&constant_query)?; + + println!("Encoded block length: {bytes:?}"); + println!("Block length: {value:?}"); + Ok(()) +} diff --git a/new/examples/events.rs b/new/examples/events.rs new file mode 100644 index 0000000000..9861c9238e --- /dev/null +++ b/new/examples/events.rs @@ -0,0 +1,48 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, PolkadotConfig}; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Get events for the latest block: + let events = api.events().at_latest().await?; + + // We can dynamically decode events: + println!("Dynamic event details:"); + for event in events.iter() { + let event = event?; + + let pallet = event.pallet_name(); + let variant = event.variant_name(); + let field_values = event.decode_as_fields::()?; + + println!("{pallet}::{variant}: {field_values}"); + } + + // Or we can attempt to statically decode them into the root Event type: + println!("Static event details:"); + for event in events.iter() { + let event = event?; + + if let Ok(ev) = event.as_root_event::() { + println!("{ev:?}"); + } else { + println!(""); + } + } + + // Or we can look for specific events which match our statically defined ones: + let transfer_event = events.find_first::()?; + if let Some(ev) = transfer_event { + println!(" - Balance transfer success: value: {:?}", ev.amount); + } else { + println!(" - No balance transfer event found in this block"); + } + + Ok(()) +} diff --git a/new/examples/light_client_basic.rs b/new/examples/light_client_basic.rs new file mode 100644 index 0000000000..397de2a255 --- /dev/null +++ b/new/examples/light_client_basic.rs @@ -0,0 +1,47 @@ +#![allow(missing_docs)] +use futures::StreamExt; +use subxt::{PolkadotConfig, client::OnlineClient, lightclient::LightClient}; + +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +const POLKADOT_SPEC: &str = include_str!("../../artifacts/demo_chain_specs/polkadot.json"); +const ASSET_HUB_SPEC: &str = + include_str!("../../artifacts/demo_chain_specs/polkadot_asset_hub.json"); + +#[tokio::main] +async fn main() -> Result<(), Box> { + // The lightclient logs are informative: + tracing_subscriber::fmt::init(); + + // Instantiate a light client with the Polkadot relay chain, + // and connect it to Asset Hub, too. + let (lightclient, polkadot_rpc) = LightClient::relay_chain(POLKADOT_SPEC)?; + let asset_hub_rpc = lightclient.parachain(ASSET_HUB_SPEC)?; + + // Create Subxt clients from these Smoldot backed RPC clients. + let polkadot_api = OnlineClient::::from_rpc_client(polkadot_rpc).await?; + let asset_hub_api = OnlineClient::::from_rpc_client(asset_hub_rpc).await?; + + // Use them! + let polkadot_sub = polkadot_api + .blocks() + .subscribe_finalized() + .await? + .map(|block| ("Polkadot", block)); + let parachain_sub = asset_hub_api + .blocks() + .subscribe_finalized() + .await? + .map(|block| ("AssetHub", block)); + + let mut stream_combinator = futures::stream::select(polkadot_sub, parachain_sub); + + while let Some((chain, block)) = stream_combinator.next().await { + let block = block?; + println!(" Chain {:?} hash={:?}", chain, block.hash()); + } + + Ok(()) +} diff --git a/new/examples/light_client_local_node.rs b/new/examples/light_client_local_node.rs new file mode 100644 index 0000000000..68012b8551 --- /dev/null +++ b/new/examples/light_client_local_node.rs @@ -0,0 +1,58 @@ +#![allow(missing_docs)] +use subxt::utils::fetch_chainspec_from_rpc_node; +use subxt::{ + PolkadotConfig, + client::OnlineClient, + lightclient::{ChainConfig, LightClient}, +}; +use subxt_signer::sr25519::dev; + +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // The smoldot logs are informative: + tracing_subscriber::fmt::init(); + + // Use a utility function to obtain a chain spec from a locally running node: + let chain_spec = fetch_chainspec_from_rpc_node("ws://127.0.0.1:9944").await?; + + // Configure the bootnodes of this chain spec. In this case, because we start one + // single node, the bootnodes must be overwritten for the light client to connect + // to the local node. + // + // The `12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp` is the P2P address + // from a local polkadot node starting with + // `--node-key 0000000000000000000000000000000000000000000000000000000000000001` + let chain_config = ChainConfig::chain_spec(chain_spec.get()).set_bootnodes([ + "/ip4/127.0.0.1/tcp/30333/p2p/12D3KooWEyoppNCUx8Yx66oV9fJnriXwCcXwDDUA2kj6vnc6iDEp", + ])?; + + // Start the light client up, establishing a connection to the local node. + let (_light_client, chain_rpc) = LightClient::relay_chain(chain_config)?; + let api = OnlineClient::::from_rpc_client(chain_rpc).await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); + + // Submit the balance transfer extrinsic from Alice, and wait for it to be successful + // and in a finalized block. We get back the extrinsic events if all is well. + let from = dev::alice(); + let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) + .await? + .wait_for_finalized_success() + .await?; + + // Find a Transfer event and print it. + let transfer_event = events.find_first::()?; + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } + + Ok(()) +} diff --git a/new/examples/rpc_legacy.rs b/new/examples/rpc_legacy.rs new file mode 100644 index 0000000000..a21afd8f97 --- /dev/null +++ b/new/examples/rpc_legacy.rs @@ -0,0 +1,61 @@ +#![allow(missing_docs)] +use subxt::backend::{legacy::LegacyRpcMethods, rpc::RpcClient}; +use subxt::config::DefaultExtrinsicParamsBuilder as Params; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // First, create a raw RPC client: + let rpc_client = RpcClient::from_url("ws://127.0.0.1:9944").await?; + + // Use this to construct our RPC methods: + let rpc = LegacyRpcMethods::::new(rpc_client.clone()); + + // We can use the same client to drive our full Subxt interface too: + let api = OnlineClient::::from_rpc_client(rpc_client.clone()).await?; + + // Now, we can make some RPC calls using some legacy RPC methods. + println!( + "šŸ“› System Name: {:?}\n🩺 Health: {:?}\nšŸ–« Properties: {:?}\nšŸ”— Chain: {:?}\n", + rpc.system_name().await?, + rpc.system_health().await?, + rpc.system_properties().await?, + rpc.system_chain().await? + ); + + // We can also interleave RPC calls and using the full Subxt client, here to submit multiple + // transactions using the legacy `system_account_next_index` RPC call, which returns a nonce + // that is adjusted for any transactions already in the pool: + + let alice = dev::alice(); + let bob = dev::bob(); + + loop { + let current_nonce = rpc + .system_account_next_index(&alice.public_key().into()) + .await?; + + let ext_params = Params::new().mortal(8).nonce(current_nonce).build(); + + let balance_transfer = polkadot::tx() + .balances() + .transfer_allow_death(bob.public_key().into(), 1_000_000); + + let ext_hash = api + .tx() + .create_partial_offline(&balance_transfer, ext_params)? + .sign(&alice) + .submit() + .await?; + + println!("Submitted ext {ext_hash} with nonce {current_nonce}"); + + // Sleep less than block time, but long enough to ensure + // not all transactions end up in the same block. + tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; + } +} diff --git a/new/examples/runtime_apis_dynamic.rs b/new/examples/runtime_apis_dynamic.rs new file mode 100644 index 0000000000..ef9c4ac071 --- /dev/null +++ b/new/examples/runtime_apis_dynamic.rs @@ -0,0 +1,30 @@ +#![allow(missing_docs)] +use subxt::utils::AccountId32; +use subxt::{OnlineClient, config::PolkadotConfig}; +use subxt_signer::sr25519::dev; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Create a "dynamic" runtime API payload that calls the + // `AccountNonceApi_account_nonce` function. We could use the + // `scale_value::Value` type as output, and a vec of those as inputs, + // but since we know the input + return types we can pass them directly. + // There is one input argument, so the inputs are a tuple of one element. + let account: AccountId32 = dev::alice().public_key().into(); + let runtime_api_call = + subxt::dynamic::runtime_api_call::<_, u64>("AccountNonceApi", "account_nonce", (account,)); + + // Submit the call to get back a result. + let nonce = api + .runtime_api() + .at_latest() + .await? + .call(runtime_api_call) + .await?; + + println!("Account nonce: {:#?}", nonce); + Ok(()) +} diff --git a/new/examples/runtime_apis_raw.rs b/new/examples/runtime_apis_raw.rs new file mode 100644 index 0000000000..45b5eecc50 --- /dev/null +++ b/new/examples/runtime_apis_raw.rs @@ -0,0 +1,23 @@ +#![allow(missing_docs)] +use subxt::ext::codec::{Compact, Decode}; +use subxt::ext::frame_metadata::RuntimeMetadataPrefixed; +use subxt::{OnlineClient, PolkadotConfig}; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Use runtime APIs at the latest block: + let runtime_apis = api.runtime_api().at_latest().await?; + + // Ask for metadata and decode it: + let result_bytes = runtime_apis.call_raw("Metadata_metadata", None).await?; + let (_, meta): (Compact, RuntimeMetadataPrefixed) = Decode::decode(&mut &*result_bytes)?; + + println!("{meta:?}"); + Ok(()) +} diff --git a/new/examples/runtime_apis_static.rs b/new/examples/runtime_apis_static.rs new file mode 100644 index 0000000000..95228668e6 --- /dev/null +++ b/new/examples/runtime_apis_static.rs @@ -0,0 +1,28 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, config::PolkadotConfig}; +use subxt_signer::sr25519::dev; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a client to use: + let api = OnlineClient::::new().await?; + + // Create a runtime API payload that calls into + // `AccountNonceApi_account_nonce` function. + let account = dev::alice().public_key().into(); + let runtime_api_call = polkadot::apis().account_nonce_api().account_nonce(account); + + // Submit the call and get back a result. + let nonce = api + .runtime_api() + .at_latest() + .await? + .call(runtime_api_call) + .await; + + println!("AccountNonceApi_account_nonce for Alice: {nonce:?}"); + Ok(()) +} diff --git a/new/examples/setup_client_custom_rpc.rs b/new/examples/setup_client_custom_rpc.rs new file mode 100644 index 0000000000..47580ba32a --- /dev/null +++ b/new/examples/setup_client_custom_rpc.rs @@ -0,0 +1,86 @@ +#![allow(missing_docs)] +use std::{ + fmt::Write, + pin::Pin, + sync::{Arc, Mutex}, +}; +use subxt::{ + OnlineClient, PolkadotConfig, + backend::rpc::{RawRpcFuture, RawRpcSubscription, RawValue, RpcClient, RpcClientT}, +}; + +// A dummy RPC client that doesn't actually handle requests properly +// at all, but instead just logs what requests to it were made. +struct MyLoggingClient { + log: Arc>, +} + +// We have to implement this fairly low level trait to turn [`MyLoggingClient`] +// into an RPC client that we can make use of in Subxt. Here we just log the requests +// made but don't forward them to any real node, and instead just return nonsense. +impl RpcClientT for MyLoggingClient { + fn request_raw<'a>( + &'a self, + method: &'a str, + params: Option>, + ) -> RawRpcFuture<'a, Box> { + writeln!( + self.log.lock().unwrap(), + "{method}({})", + params.as_ref().map(|p| p.get()).unwrap_or("[]") + ) + .unwrap(); + + // We've logged the request; just return garbage. Because a boxed future is returned, + // you're able to run whatever async code you'd need to actually talk to a node. + let res = RawValue::from_string("[]".to_string()).unwrap(); + Box::pin(std::future::ready(Ok(res))) + } + + fn subscribe_raw<'a>( + &'a self, + sub: &'a str, + params: Option>, + unsub: &'a str, + ) -> RawRpcFuture<'a, RawRpcSubscription> { + writeln!( + self.log.lock().unwrap(), + "{sub}({}) (unsub: {unsub})", + params.as_ref().map(|p| p.get()).unwrap_or("[]") + ) + .unwrap(); + + // We've logged the request; just return garbage. Because a boxed future is returned, + // and that will return a boxed Stream impl, you have a bunch of flexibility to build + // and return whatever type of Stream you see fit. + let res = RawValue::from_string("[]".to_string()).unwrap(); + let stream = futures::stream::once(async move { Ok(res) }); + let stream: Pin + Send>> = Box::pin(stream); + // This subscription does not provide an ID. + Box::pin(std::future::ready(Ok(RawRpcSubscription { + stream, + id: None, + }))) + } +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Instantiate our replacement RPC client. + let log = Arc::default(); + let rpc_client = { + let inner = MyLoggingClient { + log: Arc::clone(&log), + }; + RpcClient::new(inner) + }; + + // Pass this into our OnlineClient to instantiate it. This will lead to some + // RPC calls being made to fetch chain details/metadata, which will immediately + // fail.. + let _ = OnlineClient::::from_rpc_client(rpc_client).await; + + // But, we can see that the calls were made via our custom RPC client: + println!("Log of calls made:\n\n{}", log.lock().unwrap().as_str()); + Ok(()) +} diff --git a/new/examples/setup_client_offline.rs b/new/examples/setup_client_offline.rs new file mode 100644 index 0000000000..ba483f7164 --- /dev/null +++ b/new/examples/setup_client_offline.rs @@ -0,0 +1,35 @@ +#![allow(missing_docs)] +use subxt::ext::codec::Decode; +use subxt::metadata::Metadata; +use subxt::utils::H256; +use subxt::{OfflineClient, config::PolkadotConfig}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // We need to obtain the following details for an OfflineClient to be instantiated: + + // 1. Genesis hash (RPC call: chain_getBlockHash(0)): + let genesis_hash = { + let h = "91b171bb158e2d3848fa23a9f1c25182fb8e20313b2c1eb49219da7a70ce90c3"; + let bytes = hex::decode(h).unwrap(); + H256::from_slice(&bytes) + }; + + // 2. A runtime version (system_version constant on a Substrate node has these): + let runtime_version = subxt::client::RuntimeVersion { + spec_version: 9370, + transaction_version: 20, + }; + + // 3. Metadata (I'll load it from the downloaded metadata, but you can use + // `subxt metadata > file.scale` to download it): + let metadata = { + let bytes = std::fs::read("./artifacts/polkadot_metadata_small.scale").unwrap(); + Metadata::decode(&mut &*bytes).unwrap() + }; + + // Create an offline client using the details obtained above: + let _api = OfflineClient::::new(genesis_hash, runtime_version, metadata); + + Ok(()) +} diff --git a/new/examples/setup_config_assethub.rs b/new/examples/setup_config_assethub.rs new file mode 100644 index 0000000000..b39f39a2dd --- /dev/null +++ b/new/examples/setup_config_assethub.rs @@ -0,0 +1,54 @@ +#![allow(missing_docs)] +use subxt::config::{ + Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, PolkadotConfig, SubstrateConfig, +}; +use subxt_signer::sr25519::dev; + +#[subxt::subxt( + runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", + derive_for_type( + path = "staging_xcm::v3::multilocation::MultiLocation", + derive = "Clone, codec::Encode", + recursive + ) +)] +pub mod runtime {} +use runtime::runtime_types::staging_xcm::v3::multilocation::MultiLocation; +use runtime::runtime_types::xcm::v3::junctions::Junctions; + +// We don't need to construct this at runtime, so an empty enum is appropriate. +pub enum AssetHubConfig {} + +impl Config for AssetHubConfig { + type AccountId = ::AccountId; + type Address = ::Address; + type Signature = ::Signature; + type Hasher = ::Hasher; + type Header = ::Header; + type ExtrinsicParams = DefaultExtrinsicParams; + // Here we use the MultiLocation from the metadata as a part of the config: + // The `ChargeAssetTxPayment` signed extension that is part of the ExtrinsicParams above, now uses the type: + type AssetId = MultiLocation; +} + +#[tokio::main] +async fn main() { + // With the config defined, we can create an extrinsic with subxt: + let client = subxt::OnlineClient::::new().await.unwrap(); + let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); + + // Build extrinsic params using an asset at this location as a tip: + let location: MultiLocation = MultiLocation { + parents: 3, + interior: Junctions::Here, + }; + let tx_config = DefaultExtrinsicParamsBuilder::::new() + .tip_of(1234, location) + .build(); + + // And provide the extrinsic params including the tip when submitting a transaction: + let _ = client + .tx() + .sign_and_submit_then_watch(&tx_payload, &dev::alice(), tx_config) + .await; +} diff --git a/new/examples/setup_config_custom.rs b/new/examples/setup_config_custom.rs new file mode 100644 index 0000000000..a4732f3f89 --- /dev/null +++ b/new/examples/setup_config_custom.rs @@ -0,0 +1,97 @@ +#![allow(missing_docs)] +use codec::Encode; +use subxt::client::ClientState; +use subxt::config::{ + Config, ExtrinsicParams, ExtrinsicParamsEncoder, ExtrinsicParamsError, HashFor, + transaction_extensions::Params, +}; +use subxt_signer::sr25519::dev; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale")] +pub mod runtime {} + +// We don't need to construct this at runtime, +// so an empty enum is appropriate: +pub enum CustomConfig {} + +impl Config for CustomConfig { + type AccountId = subxt::utils::AccountId32; + type Address = subxt::utils::MultiAddress; + type Signature = subxt::utils::MultiSignature; + type Hasher = subxt::config::substrate::BlakeTwo256; + type Header = subxt::config::substrate::SubstrateHeader; + type ExtrinsicParams = CustomExtrinsicParams; + type AssetId = u32; +} + +// This represents some arbitrary (and nonsensical) custom parameters that +// will be attached to transaction extra and additional payloads: +pub struct CustomExtrinsicParams { + genesis_hash: HashFor, + tip: u128, + foo: bool, +} + +// We can provide a "pretty" interface to allow users to provide these: +#[derive(Default)] +pub struct CustomExtrinsicParamsBuilder { + tip: u128, + foo: bool, +} + +impl CustomExtrinsicParamsBuilder { + pub fn new() -> Self { + Default::default() + } + pub fn tip(mut self, value: u128) -> Self { + self.tip = value; + self + } + pub fn enable_foo(mut self) -> Self { + self.foo = true; + self + } +} + +impl Params for CustomExtrinsicParamsBuilder {} + +// Describe how to fetch and then encode the params: +impl ExtrinsicParams for CustomExtrinsicParams { + type Params = CustomExtrinsicParamsBuilder; + + // Gather together all of the params we will need to encode: + fn new(client: &ClientState, params: Self::Params) -> Result { + Ok(Self { + genesis_hash: client.genesis_hash, + tip: params.tip, + foo: params.foo, + }) + } +} + +// Encode the relevant params when asked: +impl ExtrinsicParamsEncoder for CustomExtrinsicParams { + fn encode_value_to(&self, v: &mut Vec) { + (self.tip, self.foo).encode_to(v); + } + fn encode_implicit_to(&self, v: &mut Vec) { + self.genesis_hash.encode_to(v) + } +} + +#[tokio::main] +async fn main() { + // With the config defined, it can be handed to Subxt as follows: + let client = subxt::OnlineClient::::new().await.unwrap(); + + let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); + + // Build your custom "Params": + let tx_config = CustomExtrinsicParamsBuilder::new().tip(1234).enable_foo(); + + // And provide them when submitting a transaction: + let _ = client + .tx() + .sign_and_submit_then_watch(&tx_payload, &dev::alice(), tx_config) + .await; +} diff --git a/new/examples/setup_config_transaction_extension.rs b/new/examples/setup_config_transaction_extension.rs new file mode 100644 index 0000000000..f0fcc58894 --- /dev/null +++ b/new/examples/setup_config_transaction_extension.rs @@ -0,0 +1,106 @@ +#![allow(missing_docs)] +use codec::Encode; +use scale_encode::EncodeAsType; +use scale_info::PortableRegistry; +use subxt::client::ClientState; +use subxt::config::transaction_extensions; +use subxt::config::{ + Config, DefaultExtrinsicParamsBuilder, ExtrinsicParams, ExtrinsicParamsEncoder, + ExtrinsicParamsError, +}; +use subxt_signer::sr25519::dev; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod runtime {} + +// We don't need to construct this at runtime, +// so an empty enum is appropriate: +#[derive(EncodeAsType)] +pub enum CustomConfig {} + +impl Config for CustomConfig { + type AccountId = subxt::utils::AccountId32; + type Address = subxt::utils::MultiAddress; + type Signature = subxt::utils::MultiSignature; + type Hasher = subxt::config::substrate::BlakeTwo256; + type Header = subxt::config::substrate::SubstrateHeader; + type ExtrinsicParams = transaction_extensions::AnyOf< + Self, + ( + // Load in the existing signed extensions we're interested in + // (if the extension isn't actually needed it'll just be ignored): + transaction_extensions::VerifySignature, + transaction_extensions::CheckSpecVersion, + transaction_extensions::CheckTxVersion, + transaction_extensions::CheckNonce, + transaction_extensions::CheckGenesis, + transaction_extensions::CheckMortality, + transaction_extensions::ChargeAssetTxPayment, + transaction_extensions::ChargeTransactionPayment, + transaction_extensions::CheckMetadataHash, + // And add a new one of our own: + CustomTransactionExtension, + ), + >; + type AssetId = u32; +} + +// Our custom signed extension doesn't do much: +pub struct CustomTransactionExtension; + +// Give the extension a name; this allows `AnyOf` to look it +// up in the chain metadata in order to know when and if to use it. +impl transaction_extensions::TransactionExtension for CustomTransactionExtension { + type Decoded = (); + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CustomTransactionExtension" + } +} + +// Gather together any params we need for our signed extension, here none. +impl ExtrinsicParams for CustomTransactionExtension { + type Params = (); + + fn new(_client: &ClientState, _params: Self::Params) -> Result { + Ok(CustomTransactionExtension) + } +} + +// Encode whatever the extension needs to provide when asked: +impl ExtrinsicParamsEncoder for CustomTransactionExtension { + fn encode_value_to(&self, v: &mut Vec) { + "Hello".encode_to(v); + } + fn encode_implicit_to(&self, v: &mut Vec) { + true.encode_to(v) + } +} + +// When composing a tuple of signed extensions, the user parameters we need must +// be able to convert `Into` a tuple of corresponding `Params`. Here, we just +// "hijack" the default param builder, but add the `Params` (`()`) for our +// new signed extension at the end, to make the types line up. IN reality you may wish +// to construct an entirely new interface to provide the relevant `Params`. +pub fn custom( + params: DefaultExtrinsicParamsBuilder, +) -> <::ExtrinsicParams as ExtrinsicParams>::Params { + let (a, b, c, d, e, f, g, h, i) = params.build(); + (a, b, c, d, e, f, g, h, i, ()) +} + +#[tokio::main] +async fn main() { + // With the config defined, it can be handed to Subxt as follows: + let client = subxt::OnlineClient::::new().await.unwrap(); + + let tx_payload = runtime::tx().system().remark(b"Hello".to_vec()); + + // Configure the tx params: + let tx_config = DefaultExtrinsicParamsBuilder::new().tip(1234); + + // And provide them when submitting a transaction: + let _ = client + .tx() + .sign_and_submit_then_watch(&tx_payload, &dev::alice(), custom(tx_config)) + .await; +} diff --git a/new/examples/setup_reconnecting_rpc_client.rs b/new/examples/setup_reconnecting_rpc_client.rs new file mode 100644 index 0000000000..a3763947c7 --- /dev/null +++ b/new/examples/setup_reconnecting_rpc_client.rs @@ -0,0 +1,77 @@ +//! Example to utilize the `reconnecting rpc client` in subxt +//! which hidden behind behind `--feature reconnecting-rpc-client` +//! +//! To utilize full logs from the RPC client use: +//! `RUST_LOG="jsonrpsee=trace,subxt-reconnecting-rpc-client=trace"` + +#![allow(missing_docs)] + +use std::time::Duration; + +use futures::StreamExt; +use subxt::backend::rpc::reconnecting_rpc_client::{ExponentialBackoff, RpcClient}; +use subxt::{OnlineClient, PolkadotConfig}; + +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + + // Create a new client with a reconnecting RPC client. + let rpc = RpcClient::builder() + // Reconnect with exponential backoff + // + // This API is "iterator-like" and we use `take` to limit the number of retries. + .retry_policy( + ExponentialBackoff::from_millis(100) + .max_delay(Duration::from_secs(10)) + .take(3), + ) + // There are other configurations as well that can be found at [`reconnecting_rpc_client::ClientBuilder`]. + .build("ws://localhost:9944".to_string()) + .await?; + + // If you want to use the chainhead backend with the reconnecting RPC client, you can do so like this: + // + // ``` + // use subxt::backend::chain_head:ChainHeadBackend; + // use subxt::OnlineClient; + // + // let backend = ChainHeadBackend::builder().build_with_background_task(RpcClient::new(rpc.clone())); + // let api: OnlineClient = OnlineClient::from_backend(Arc::new(backend)).await?; + // ``` + + let api: OnlineClient = OnlineClient::from_rpc_client(rpc.clone()).await?; + + // Run for at most 100 blocks and print a bunch of information about it. + // + // The subscription is automatically re-started when the RPC client has reconnected. + // You can test that by stopping the polkadot node and restarting it. + let mut blocks_sub = api.blocks().subscribe_finalized().await?.take(100); + + while let Some(block) = blocks_sub.next().await { + let block = match block { + Ok(b) => b, + Err(e) => { + // This can only happen on the legacy backend and the unstable backend + // will handle this internally. + if e.is_disconnected_will_reconnect() { + println!("The RPC connection was lost and we may have missed a few blocks"); + continue; + } + + return Err(e.into()); + } + }; + + let block_number = block.number(); + let block_hash = block.hash(); + + println!("Block #{block_number} ({block_hash})"); + } + + Ok(()) +} diff --git a/new/examples/setup_rpc_chainhead_backend.rs b/new/examples/setup_rpc_chainhead_backend.rs new file mode 100644 index 0000000000..37da5fce19 --- /dev/null +++ b/new/examples/setup_rpc_chainhead_backend.rs @@ -0,0 +1,35 @@ +//! Example to utilize the ChainHeadBackend rpc backend to subscribe to finalized blocks. + +#![allow(missing_docs)] + +use futures::StreamExt; +use subxt::backend::chain_head::{ChainHeadBackend, ChainHeadBackendBuilder}; +use subxt::backend::rpc::RpcClient; +use subxt::{OnlineClient, PolkadotConfig}; + +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + + let rpc = RpcClient::from_url("ws://localhost:9944".to_string()).await?; + let backend: ChainHeadBackend = + ChainHeadBackendBuilder::default().build_with_background_driver(rpc.clone()); + let api = OnlineClient::from_backend(std::sync::Arc::new(backend)).await?; + + let mut blocks_sub = api.blocks().subscribe_finalized().await?.take(100); + + while let Some(block) = blocks_sub.next().await { + let block = block?; + + let block_number = block.number(); + let block_hash = block.hash(); + + println!("Block #{block_number} ({block_hash})"); + } + + Ok(()) +} diff --git a/new/examples/storage_fetch.rs b/new/examples/storage_fetch.rs new file mode 100644 index 0000000000..1fe491898c --- /dev/null +++ b/new/examples/storage_fetch.rs @@ -0,0 +1,32 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; + +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Polkadot nodes. + let api = OnlineClient::::new().await?; + let account = dev::alice().public_key().into(); + + // Build a storage query to access account information. + let storage_query = polkadot::storage().system().account(); + + // Use that query to access a storage entry, fetch a result and decode the value. + // The static address knows that fetching requires a tuple of one value, an + // AccountId32. + let client_at = api.storage().at_latest().await?; + let account_info = client_at + .entry(storage_query)? + .fetch((account,)) + .await? + .decode()?; + + // The static address that we got from the subxt macro knows the expected input + // and return types, so it is decoded into a static type for us. + println!("Alice: {account_info:?}"); + Ok(()) +} diff --git a/new/examples/storage_fetch_dynamic.rs b/new/examples/storage_fetch_dynamic.rs new file mode 100644 index 0000000000..61a81fef98 --- /dev/null +++ b/new/examples/storage_fetch_dynamic.rs @@ -0,0 +1,34 @@ +#![allow(missing_docs)] +use subxt::dynamic::{At, Value}; +use subxt::utils::AccountId32; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Polkadot nodes. + let api = OnlineClient::::new().await?; + + // Build a dynamic storage query to access account information. + // here, we assume that there is one value to provide at this entry + // to access a value; an AccountId32. In this example we don't know the + // return type and so we set it to `Value`, which anything can decode into. + let account: AccountId32 = dev::alice().public_key().into(); + let storage_query = subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); + + // Use that query to access a storage entry, fetch a result and decode the value. + let client_at = api.storage().at_latest().await?; + let account_info = client_at + .entry(storage_query)? + .fetch((account,)) + .await? + .decode()?; + + // With out `Value` type we can dig in to find what we want using the `At` + // trait and `.at()` method that this provides on the Value. + println!( + "Alice has free balance: {}", + account_info.at("data").at("free").unwrap() + ); + Ok(()) +} diff --git a/new/examples/storage_iterating.rs b/new/examples/storage_iterating.rs new file mode 100644 index 0000000000..3ff74029bd --- /dev/null +++ b/new/examples/storage_iterating.rs @@ -0,0 +1,42 @@ +#![allow(missing_docs)] +use subxt::ext::futures::StreamExt; +use subxt::{OnlineClient, PolkadotConfig}; + +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Polkadot nodes. + let api = OnlineClient::::new().await?; + + // Build a storage query to access account information. Same as if we were + // fetching a single value from this entry. + let storage_query = polkadot::storage().system().account(); + + // Use that query to access a storage entry, iterate over it and decode values. + let client_at = api.storage().at_latest().await?; + + // We provide an empty tuple when iterating. If the storage entry had been an N map with + // multiple keys, then we could provide any prefix of those keys to iterate over. This is + // statically type checked, so only a valid number/type of keys in the tuple is accepted. + let mut values = client_at.entry(storage_query)?.iter(()).await?; + + while let Some(kv) = values.next().await { + let kv = kv?; + + // The key decodes into the type that the static address knows about, in this case a + // tuple of one entry, because the only part of the key that we can decode is the + // AccountId32 for each user. + let (account_id32,) = kv.key()?.decode()?; + + // The value decodes into a statically generated type which holds account information. + let value = kv.value().decode()?; + + let value_data = value.data; + println!("{account_id32}:\n {value_data:?}"); + } + + Ok(()) +} diff --git a/new/examples/storage_iterating_dynamic.rs b/new/examples/storage_iterating_dynamic.rs new file mode 100644 index 0000000000..443c977eef --- /dev/null +++ b/new/examples/storage_iterating_dynamic.rs @@ -0,0 +1,42 @@ +#![allow(missing_docs)] +use subxt::ext::futures::StreamExt; +use subxt::utils::AccountId32; +use subxt::{ + OnlineClient, PolkadotConfig, + dynamic::{At, Value}, +}; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Polkadot nodes. + let api = OnlineClient::::new().await?; + + // Build a dynamic storage query to access account information. + // here, we assume that there is one value to provide at this entry + // to access a value; an AccountId32. In this example we don't know the + // return type and so we set it to `Value`, which anything can decode into. + let storage_query = subxt::dynamic::storage::<(AccountId32,), Value>("System", "Account"); + + // Use that query to access a storage entry, iterate over it and decode values. + let client_at = api.storage().at_latest().await?; + let mut values = client_at.entry(storage_query)?.iter(()).await?; + + while let Some(kv) = values.next().await { + let kv = kv?; + + // The key decodes into the first type we provided in the address. Since there's just + // one key, it is a tuple of one entry, an AccountId32. If we didn't know how many + // keys or their type, we could set the key to `Vec` instead. + let (account_id32,) = kv.key()?.decode()?; + + // The value decodes into the second type we provided in the address. In this example, + // we just decode it into our `Value` type and then look at the "data" field in this + // (which implicitly assumes we get a struct shaped thing back with such a field). + let value = kv.value().decode()?; + + let value_data = value.at("data").unwrap(); + println!("{account_id32}:\n {value_data}"); + } + + Ok(()) +} diff --git a/new/examples/substrate_compat_signer.rs b/new/examples/substrate_compat_signer.rs new file mode 100644 index 0000000000..968adffe76 --- /dev/null +++ b/new/examples/substrate_compat_signer.rs @@ -0,0 +1,117 @@ +//! This example demonstrates how to use to add a custom signer implementation to `subxt` +//! by using the signer implementation from polkadot-sdk. +//! +//! Similar functionality was provided by the `substrate-compat` feature in the original `subxt` crate. +//! which is now removed. + +#![allow(missing_docs, unused)] + +use sp_core::{Pair as _, sr25519}; +use subxt::config::substrate::MultiAddress; +use subxt::{Config, OnlineClient, PolkadotConfig}; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +/// A concrete PairSigner implementation which relies on `sr25519::Pair` for signing +/// and that PolkadotConfig is the runtime configuration. +mod pair_signer { + use super::*; + use sp_runtime::{ + MultiSignature as SpMultiSignature, + traits::{IdentifyAccount, Verify}, + }; + use subxt::{ + config::substrate::{AccountId32, MultiSignature}, + tx::Signer, + }; + + /// A [`Signer`] implementation for [`sp_core::sr25519::Pair`]. + #[derive(Clone)] + pub struct PairSigner { + account_id: ::AccountId, + signer: sr25519::Pair, + } + + impl PairSigner { + /// Creates a new [`Signer`] from an [`sp_core::sr25519::Pair`]. + pub fn new(signer: sr25519::Pair) -> Self { + let account_id = + ::Signer::from(signer.public()).into_account(); + Self { + // Convert `sp_core::AccountId32` to `subxt::config::substrate::AccountId32`. + // + // This is necessary because we use `subxt::config::substrate::AccountId32` and no + // From/Into impls are provided between `sp_core::AccountId32` because `polkadot-sdk` isn't a direct + // dependency in subxt. + // + // This can also be done by provided a wrapper type around `subxt::config::substrate::AccountId32` to implement + // such conversions but that also most likely requires a custom `Config` with a separate `AccountId` type to work + // properly without additional hacks. + account_id: AccountId32(account_id.into()), + signer, + } + } + + /// Returns the [`sp_core::sr25519::Pair`] implementation used to construct this. + pub fn signer(&self) -> &sr25519::Pair { + &self.signer + } + + /// Return the account ID. + pub fn account_id(&self) -> &AccountId32 { + &self.account_id + } + } + + impl Signer for PairSigner { + fn account_id(&self) -> ::AccountId { + self.account_id.clone() + } + + fn sign(&self, signer_payload: &[u8]) -> ::Signature { + let signature = self.signer.sign(signer_payload); + MultiSignature::Sr25519(signature.0) + } + } +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + tracing_subscriber::fmt::init(); + + // Create a new API client, configured to talk to Polkadot nodes. + let api = OnlineClient::::new().await?; + + let signer = { + let acc = sr25519::Pair::from_string("//Alice", None)?; + pair_signer::PairSigner::new(acc) + }; + + let dest = { + let acc = sr25519::Pair::from_string("//Bob", None)?; + MultiAddress::Address32(acc.public().0) + }; + + // Build a balance transfer extrinsic. + let balance_transfer_tx = polkadot::tx() + .balances() + .transfer_allow_death(dest, 100_000); + + // Submit the balance transfer extrinsic from Alice, and wait for it to be successful + // and in a finalized block. We get back the extrinsic events if all is well. + let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &signer) + .await? + .wait_for_finalized_success() + .await?; + + // Find a Transfer event and print it. + let transfer_event = events.find_first::()?; + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } + + Ok(()) +} diff --git a/new/examples/tx_basic.rs b/new/examples/tx_basic.rs new file mode 100644 index 0000000000..0c2dc243eb --- /dev/null +++ b/new/examples/tx_basic.rs @@ -0,0 +1,35 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; + +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Polkadot nodes. + let api = OnlineClient::::new().await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); + + // Submit the balance transfer extrinsic from Alice, and wait for it to be successful + // and in a finalized block. We get back the extrinsic events if all is well. + let from = dev::alice(); + let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) + .await? + .wait_for_finalized_success() + .await?; + + // Find a Transfer event and print it. + let transfer_event = events.find_first::()?; + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } + + Ok(()) +} diff --git a/new/examples/tx_basic_frontier.rs b/new/examples/tx_basic_frontier.rs new file mode 100644 index 0000000000..23b577a055 --- /dev/null +++ b/new/examples/tx_basic_frontier.rs @@ -0,0 +1,56 @@ +//! Example to use subxt to talk to substrate-based nodes with ethereum accounts +//! which is not the default for subxt which is why we need to provide a custom config. +//! +//! This example requires to run a local frontier/moonbeam node to work. + +#![allow(missing_docs)] + +use subxt::OnlineClient; +use subxt_core::utils::AccountId20; +use subxt_signer::eth::{Signature, dev}; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/frontier_metadata_small.scale")] +mod eth_runtime {} + +enum EthRuntimeConfig {} + +impl subxt::Config for EthRuntimeConfig { + type AccountId = AccountId20; + type Address = AccountId20; + type Signature = Signature; + type Hasher = subxt::config::substrate::BlakeTwo256; + type Header = + subxt::config::substrate::SubstrateHeader; + type ExtrinsicParams = subxt::config::SubstrateExtrinsicParams; + type AssetId = u32; +} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let api = OnlineClient::::from_insecure_url("ws://127.0.0.1:9944").await?; + + let alith = dev::alith(); + let baltathar = dev::baltathar(); + let dest = baltathar.public_key().to_account_id(); + + println!("baltathar pub: {}", hex::encode(baltathar.public_key().0)); + println!("baltathar addr: {}", hex::encode(dest)); + + let balance_transfer_tx = eth_runtime::tx() + .balances() + .transfer_allow_death(dest, 10_001); + + let events = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &alith) + .await? + .wait_for_finalized_success() + .await?; + + let transfer_event = events.find_first::()?; + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } + + Ok(()) +} diff --git a/new/examples/tx_boxed.rs b/new/examples/tx_boxed.rs new file mode 100644 index 0000000000..0dd4c4d2e0 --- /dev/null +++ b/new/examples/tx_boxed.rs @@ -0,0 +1,43 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + let api = OnlineClient::::new().await?; + + // Prepare some extrinsics. These are boxed so that they can live alongside each other. + let txs = [dynamic_remark(), balance_transfer(), remark()]; + + for tx in txs { + let from = dev::alice(); + api.tx() + .sign_and_submit_then_watch_default(&tx, &from) + .await? + .wait_for_finalized_success() + .await?; + + println!("Submitted tx"); + } + + Ok(()) +} + +fn balance_transfer() -> Box { + let dest = dev::bob().public_key().into(); + Box::new(polkadot::tx().balances().transfer_allow_death(dest, 10_000)) +} + +fn remark() -> Box { + Box::new(polkadot::tx().system().remark(vec![1, 2, 3, 4, 5])) +} + +fn dynamic_remark() -> Box { + use subxt::dynamic::{Value, tx}; + let tx_payload = tx("System", "remark", vec![Value::from_bytes("Hello")]); + + Box::new(tx_payload) +} diff --git a/new/examples/tx_partial.rs b/new/examples/tx_partial.rs new file mode 100644 index 0000000000..0684091de6 --- /dev/null +++ b/new/examples/tx_partial.rs @@ -0,0 +1,53 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; + +type BoxedError = Box; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), BoxedError> { + // Spawned tasks require things held across await points to impl Send, + // so we use one to demonstrate that this is possible with `PartialTransaction` + tokio::spawn(signing_example()).await??; + Ok(()) +} + +async fn signing_example() -> Result<(), BoxedError> { + let api = OnlineClient::::new().await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); + + let alice = dev::alice(); + + // Create partial tx, ready to be signed. + let mut partial_tx = api + .tx() + .create_partial( + &balance_transfer_tx, + &alice.public_key().to_account_id(), + Default::default(), + ) + .await?; + + // Simulate taking some time to get a signature back, in part to + // show that the `PartialTransaction` can be held across await points. + tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; + let signature = alice.sign(&partial_tx.signer_payload()); + + // Sign the transaction. + let tx = partial_tx + .sign_with_account_and_signature(&alice.public_key().to_account_id(), &signature.into()); + + // Submit it. + tx.submit_and_watch() + .await? + .wait_for_finalized_success() + .await?; + + Ok(()) +} diff --git a/new/examples/tx_status_stream.rs b/new/examples/tx_status_stream.rs new file mode 100644 index 0000000000..cdd55c4e82 --- /dev/null +++ b/new/examples/tx_status_stream.rs @@ -0,0 +1,55 @@ +#![allow(missing_docs)] +use subxt::{OnlineClient, PolkadotConfig, tx::TxStatus}; +use subxt_signer::sr25519::dev; + +// Generate an interface that we can use from the node's metadata. +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Polkadot nodes. + let api = OnlineClient::::new().await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let balance_transfer_tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); + + // Submit the balance transfer extrinsic from Alice, and then monitor the + // progress of it. + let from = dev::alice(); + let mut balance_transfer_progress = api + .tx() + .sign_and_submit_then_watch_default(&balance_transfer_tx, &from) + .await?; + + while let Some(status) = balance_transfer_progress.next().await { + match status? { + // It's finalized in a block! + TxStatus::InFinalizedBlock(in_block) => { + println!( + "Transaction {:?} is finalized in block {:?}", + in_block.extrinsic_hash(), + in_block.block_hash() + ); + + // grab the events and fail if no ExtrinsicSuccess event seen: + let events = in_block.wait_for_success().await?; + // We can look for events (this uses the static interface; we can also iterate + // over them and dynamically decode them): + let transfer_event = events.find_first::()?; + + if let Some(event) = transfer_event { + println!("Balance transfer success: {event:?}"); + } else { + println!("Failed to find Balances::Transfer Event"); + } + } + // Just log any other status we encounter: + other => { + println!("Status: {other:?}"); + } + } + } + Ok(()) +} diff --git a/new/examples/tx_with_params.rs b/new/examples/tx_with_params.rs new file mode 100644 index 0000000000..00126a7f9f --- /dev/null +++ b/new/examples/tx_with_params.rs @@ -0,0 +1,28 @@ +#![allow(missing_docs)] +use subxt::config::polkadot::PolkadotExtrinsicParamsBuilder as Params; +use subxt::{OnlineClient, PolkadotConfig}; +use subxt_signer::sr25519::dev; + +#[subxt::subxt(runtime_metadata_path = "../artifacts/polkadot_metadata_small.scale")] +pub mod polkadot {} + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a new API client, configured to talk to Polkadot nodes. + let api = OnlineClient::::new().await?; + + // Build a balance transfer extrinsic. + let dest = dev::bob().public_key().into(); + let tx = polkadot::tx().balances().transfer_allow_death(dest, 10_000); + + // Configure the transaction parameters; we give a small tip and set the + // transaction to live for 32 blocks from the `latest_block` above. + let tx_params = Params::new().tip(1_000).mortal(32).build(); + + // submit the transaction: + let from = dev::alice(); + let hash = api.tx().sign_and_submit(&tx, &from, tx_params).await?; + println!("Balance transfer extrinsic submitted with hash : {hash}"); + + Ok(()) +} diff --git a/new/src/client.rs b/new/src/client.rs new file mode 100644 index 0000000000..824c2cbc28 --- /dev/null +++ b/new/src/client.rs @@ -0,0 +1,2 @@ +mod online_client; +mod offline_client; \ No newline at end of file diff --git a/new/src/client/offline_client.rs b/new/src/client/offline_client.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/new/src/client/online_client.rs b/new/src/client/online_client.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/new/src/config.rs b/new/src/config.rs new file mode 100644 index 0000000000..8b29c37f51 --- /dev/null +++ b/new/src/config.rs @@ -0,0 +1,166 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module provides a [`Config`] type, which is used to define various +//! types that are important in order to speak to a particular chain. +//! [`SubstrateConfig`] provides a default set of these types suitable for the +//! default Substrate node implementation, and [`PolkadotConfig`] for a +//! Polkadot node. + +mod default_extrinsic_params; +mod extrinsic_params; + +pub mod polkadot; +pub mod substrate; +pub mod transaction_extensions; + +use codec::{Decode, Encode}; +use core::fmt::Debug; +use scale_decode::DecodeAsType; +use scale_encode::EncodeAsType; +use serde::{Serialize, de::DeserializeOwned}; +use subxt_metadata::Metadata; +use std::{marker::PhantomData, sync::Arc}; +use scale_info_legacy::TypeRegistrySet; +use subxt_rpcs::RpcConfig; + +pub use default_extrinsic_params::{DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder}; +pub use extrinsic_params::{ExtrinsicParams, ExtrinsicParamsEncoder}; +pub use polkadot::{PolkadotConfig, PolkadotExtrinsicParams, PolkadotExtrinsicParamsBuilder}; +pub use substrate::{SubstrateConfig, SubstrateExtrinsicParams, SubstrateExtrinsicParamsBuilder}; +pub use transaction_extensions::TransactionExtension; + +/// Configuration for a given chain and the runtimes within. This consists of the +/// type information needed to work at the head of the chain (namely submitting +/// transactions), as well as functionality which we might wish to customize for a +/// given chain. +pub trait Config: Clone + Debug + Sized + Send + Sync + 'static { + /// The account ID type; required for constructing extrinsics. + type AccountId: Debug + Clone + Encode + Decode + Serialize + Send; + + /// The address type; required for constructing extrinsics. + type Address: Debug + Encode + From<::AccountId>; + + /// The signature type. + type Signature: Debug + Clone + Encode + Decode + Send; + + /// The block header. + type Header: Header; + + /// This type defines the extrinsic extra and additional parameters. + type ExtrinsicParams: ExtrinsicParams; + + /// This is used to identify an asset in the `ChargeAssetTxPayment` signed extension. + type AssetId: Debug + Clone + Encode + DecodeAsType + EncodeAsType + Send; + + /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). + /// This is created on demand with the relevant metadata for a given block, and + /// can then be used to hash things at that block. + type Hasher: Hasher; + + /// Return the spec version for a given block number, if available. + /// + /// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here, + /// but the [`crate::client::OfflineClient`] will error if this is not available for the required block number. + fn spec_version_for_block_number(&self, block_number: u32) -> Option; + + /// Return the metadata for a given spec version, if available. + /// + /// The [`crate::client::OnlineClient`] will look this up on chain if it's not available here, and then + /// call [`Config::set_metadata_for_spec_version`] to give the configuration the opportunity to cache it. + /// The [`crate::client::OfflineClient`] will error if this is not available for the required spec version. + fn metadata_for_spec_version( + &self, + spec_version: u32, + ) -> Option>; + + /// Set some metadata for a given spec version. the [`crate::client::OnlineClient`] will call this if it has + /// to retrieve metadata from the chain, to give this the opportunity to cache it. The configuration can + /// do nothing if it prefers. + fn set_metadata_for_spec_version( + &self, + spec_version: u32, + metadata: Arc, + ); + + /// Return legacy types (ie types to use with Runtimes that return pre-V14 metadata) for a given spec version. + /// If this returns `None`, [`subxt`] will return an error if type definitions are needed to access some older + /// block. + /// + /// This doesn't need to live for long; it will be used to translate any older metadata returned from the node + /// into our [`Metadata`] type, which will then be used. + fn legacy_types_for_spec_version<'this>( + &'this self, + spec_version: u32, + ) -> Option>; +} + +/// `RpcConfigFor` can be used anywhere which requires an implementation of [`subxt_rpcs::RpcConfig`]. +/// This is only needed at the type level, and so there is no way to construct this. +pub struct RpcConfigFor { + marker: PhantomData +} + +impl RpcConfig for RpcConfigFor { + type Hash = HashFor; + type Header = T::Header; + type AccountId = T::AccountId; +} + +/// Given some [`Config`], this returns the type of hash used. +pub type HashFor = <::Hasher as Hasher>::Hash; + +/// given some [`Config`], this return the other params needed for its `ExtrinsicParams`. +pub type ParamsFor = <::ExtrinsicParams as ExtrinsicParams>::Params; + +/// Block hashes must conform to a bunch of things to be used in Subxt. +pub trait Hash: + Debug + + Copy + + Send + + Sync + + Decode + + AsRef<[u8]> + + Serialize + + DeserializeOwned + + Encode + + PartialEq + + Eq + + core::hash::Hash +{ +} +impl Hash for T where + T: Debug + + Copy + + Send + + Sync + + Decode + + AsRef<[u8]> + + Serialize + + DeserializeOwned + + Encode + + PartialEq + + Eq + + core::hash::Hash +{ +} + +/// This represents the hasher used by a node to hash things like block headers +/// and extrinsics. +pub trait Hasher: Debug + Clone + Send + Sync + 'static { + /// The type of hash produced by this hasher. + type Hash: Hash; + + /// Construct a new hasher. + fn new(metadata: &Metadata) -> Self; + + /// Hash some bytes to the given output type. + fn hash(&self, s: &[u8]) -> Self::Hash; +} + +/// This represents the block header type used by a node. +pub trait Header: Sized + Encode + Decode + Debug + Sync + Send + DeserializeOwned + Clone { + /// Return the block number of this header. + fn number(&self) -> u32; +} diff --git a/new/src/config/default_extrinsic_params.rs b/new/src/config/default_extrinsic_params.rs new file mode 100644 index 0000000000..b16421b236 --- /dev/null +++ b/new/src/config/default_extrinsic_params.rs @@ -0,0 +1,168 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use crate::config::transaction_extensions::CheckMortalityParams; + +use super::{Config, HashFor}; +use super::{ExtrinsicParams, transaction_extensions}; + +/// The default [`super::ExtrinsicParams`] implementation understands common signed extensions +/// and how to apply them to a given chain. +pub type DefaultExtrinsicParams = transaction_extensions::AnyOf< + T, + ( + transaction_extensions::VerifySignature, + transaction_extensions::CheckSpecVersion, + transaction_extensions::CheckTxVersion, + transaction_extensions::CheckNonce, + transaction_extensions::CheckGenesis, + transaction_extensions::CheckMortality, + transaction_extensions::ChargeAssetTxPayment, + transaction_extensions::ChargeTransactionPayment, + transaction_extensions::CheckMetadataHash, + ), +>; + +/// A builder that outputs the set of [`super::ExtrinsicParams::Params`] required for +/// [`DefaultExtrinsicParams`]. This may expose methods that aren't applicable to the current +/// chain; such values will simply be ignored if so. +pub struct DefaultExtrinsicParamsBuilder { + /// `None` means the tx will be immortal, else it's mortality is described. + mortality: transaction_extensions::CheckMortalityParams, + /// `None` means the nonce will be automatically set. + nonce: Option, + /// `None` means we'll use the native token. + tip_of_asset_id: Option, + tip: u128, + tip_of: u128, +} + +impl Default for DefaultExtrinsicParamsBuilder { + fn default() -> Self { + Self { + mortality: CheckMortalityParams::::default(), + tip: 0, + tip_of: 0, + tip_of_asset_id: None, + nonce: None, + } + } +} + +impl DefaultExtrinsicParamsBuilder { + /// Configure new extrinsic params. We default to providing no tip + /// and using an immortal transaction unless otherwise configured + pub fn new() -> Self { + Default::default() + } + + /// Make the transaction immortal, meaning it will never expire. This means that it could, in + /// theory, be pending for a long time and only be included many blocks into the future. + pub fn immortal(mut self) -> Self { + self.mortality = transaction_extensions::CheckMortalityParams::::immortal(); + self + } + + /// Make the transaction mortal, given a number of blocks it will be mortal for from + /// the current block at the time of submission. + /// + /// # Warning + /// + /// This will ultimately return an error if used for creating extrinsic offline, because we need + /// additional information in order to set the mortality properly. + /// + /// When creating offline transactions, you must use [`Self::mortal_from_unchecked`] instead to set + /// the mortality. This provides all of the necessary information which we must otherwise be online + /// in order to obtain. + pub fn mortal(mut self, for_n_blocks: u64) -> Self { + self.mortality = transaction_extensions::CheckMortalityParams::::mortal(for_n_blocks); + self + } + + /// Configure a transaction that will be mortal for the number of blocks given, and from the + /// block details provided. Prefer to use [`Self::mortal()`] where possible, which prevents + /// the block number and hash from being misaligned. + pub fn mortal_from_unchecked( + mut self, + for_n_blocks: u64, + from_block_n: u64, + from_block_hash: HashFor, + ) -> Self { + self.mortality = transaction_extensions::CheckMortalityParams::mortal_from_unchecked( + for_n_blocks, + from_block_n, + from_block_hash, + ); + self + } + + /// Provide a specific nonce for the submitter of the extrinsic + pub fn nonce(mut self, nonce: u64) -> Self { + self.nonce = Some(nonce); + self + } + + /// Provide a tip to the block author in the chain's native token. + pub fn tip(mut self, tip: u128) -> Self { + self.tip = tip; + self.tip_of = tip; + self.tip_of_asset_id = None; + self + } + + /// Provide a tip to the block author using the token denominated by the `asset_id` provided. This + /// is not applicable on chains which don't use the `ChargeAssetTxPayment` signed extension; in this + /// case, no tip will be given. + pub fn tip_of(mut self, tip: u128, asset_id: T::AssetId) -> Self { + self.tip = 0; + self.tip_of = tip; + self.tip_of_asset_id = Some(asset_id); + self + } + + /// Build the extrinsic parameters. + pub fn build(self) -> as ExtrinsicParams>::Params { + let check_mortality_params = self.mortality; + + let charge_asset_tx_params = if let Some(asset_id) = self.tip_of_asset_id { + transaction_extensions::ChargeAssetTxPaymentParams::tip_of(self.tip, asset_id) + } else { + transaction_extensions::ChargeAssetTxPaymentParams::tip(self.tip) + }; + + let charge_transaction_params = + transaction_extensions::ChargeTransactionPaymentParams::tip(self.tip); + + let check_nonce_params = if let Some(nonce) = self.nonce { + transaction_extensions::CheckNonceParams::with_nonce(nonce) + } else { + transaction_extensions::CheckNonceParams::from_chain() + }; + + ( + (), + (), + (), + check_nonce_params, + (), + check_mortality_params, + charge_asset_tx_params, + charge_transaction_params, + (), + ) + } +} + +#[cfg(test)] +mod test { + use super::*; + + fn assert_default(_t: T) {} + + #[test] + fn params_are_default() { + let params = DefaultExtrinsicParamsBuilder::::new().build(); + assert_default(params) + } +} diff --git a/new/src/config/extrinsic_params.rs b/new/src/config/extrinsic_params.rs new file mode 100644 index 0000000000..0789dca22e --- /dev/null +++ b/new/src/config/extrinsic_params.rs @@ -0,0 +1,159 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module contains a trait which controls the parameters that must +//! be provided in order to successfully construct an extrinsic. +//! [`crate::config::DefaultExtrinsicParams`] provides a general-purpose +//! implementation of this that will work in many cases. + +use crate::{ + config::{Config, HashFor}, + error::ExtrinsicParamsError, +}; +use subxt_metadata::Metadata; +use std::sync::Arc; +use core::any::Any; + +/// This provides access to some relevant client state in transaction extensions, +/// and is just a combination of some of the available properties. +#[derive(Clone, Debug)] +pub struct ClientState { + /// Genesis hash. + pub genesis_hash: HashFor, + /// Runtime version. + pub runtime_version: RuntimeVersion, + /// Metadata. + pub metadata: Arc, +} + +/// Runtime version information needed to submit transactions. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct RuntimeVersion { + /// Version of the runtime specification. A full-node will not attempt to use its native + /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + /// `spec_version` and `authoring_version` are the same between Wasm and native. + pub spec_version: u32, + /// All existing dispatches are fully compatible when this number doesn't change. If this + /// number changes, then `spec_version` must change, also. + /// + /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, + /// either through an alteration in its user-level semantics, a parameter + /// added/removed/changed, a dispatchable being removed, a module being removed, or a + /// dispatchable/module changing its index. + /// + /// It need *not* change when a new module is added or when a dispatchable is added. + pub transaction_version: u32, +} + +/// This trait allows you to configure the "signed extra" and +/// "additional" parameters that are a part of the transaction payload +/// or the signer payload respectively. +pub trait ExtrinsicParams: ExtrinsicParamsEncoder + Sized + Send + 'static { + /// These parameters can be provided to the constructor along with + /// some default parameters that `subxt` understands, in order to + /// help construct your [`ExtrinsicParams`] object. + type Params: Params; + + /// Construct a new instance of our [`ExtrinsicParams`]. + fn new(client: &ClientState, params: Self::Params) -> Result; +} + +/// This trait is expected to be implemented for any [`ExtrinsicParams`], and +/// defines how to encode the "additional" and "extra" params. Both functions +/// are optional and will encode nothing by default. +pub trait ExtrinsicParamsEncoder: 'static { + /// This is expected to SCALE encode the transaction extension data to some + /// buffer that has been provided. This data is attached to the transaction + /// and also (by default) attached to the signer payload which is signed to + /// provide a signature for the transaction. + /// + /// If [`ExtrinsicParamsEncoder::encode_signer_payload_value_to`] is implemented, + /// then that will be used instead when generating a signer payload. Useful for + /// eg the `VerifySignature` extension, which is send with the transaction but + /// is not a part of the signer payload. + fn encode_value_to(&self, _v: &mut Vec) {} + + /// See [`ExtrinsicParamsEncoder::encode_value_to`]. This defaults to calling that + /// method, but if implemented will dictate what is encoded to the signer payload. + fn encode_signer_payload_value_to(&self, v: &mut Vec) { + self.encode_value_to(v); + } + + /// This is expected to SCALE encode the "implicit" (formally "additional") + /// parameters to some buffer that has been provided. These parameters are + /// _not_ sent along with the transaction, but are taken into account when + /// signing it, meaning the client and node must agree on their values. + fn encode_implicit_to(&self, _v: &mut Vec) {} + + /// Set the signature. This happens after we have constructed the extrinsic params, + /// and so is defined here rather than on the params, below. We need to use `&dyn Any` + /// to keep this trait object safe, but can downcast in the impls. + /// + /// # Panics + /// + /// Implementations of this will likely try to downcast the provided `account_id` + /// and `signature` into `T::AccountId` and `T::Signature` (where `T: Config`), and are + /// free to panic if this downcasting does not succeed. + /// + /// In typical usage, this is not a problem, since this method is only called internally + /// and provided values which line up with the relevant `Config`. In theory though, this + /// method can be called manually with any types, hence this warning. + fn inject_signature(&mut self, _account_id: &dyn Any, _signature: &dyn Any) {} +} + +/// The parameters (ie [`ExtrinsicParams::Params`]) can also have data injected into them, +/// allowing Subxt to retrieve data from the chain and amend the parameters with it when +/// online. +pub trait Params { + /// Set the account nonce. + fn inject_account_nonce(&mut self, _nonce: u64) {} + /// Set the current block. + fn inject_block(&mut self, _number: u64, _hash: HashFor) {} +} + +impl Params for () {} + +macro_rules! impl_tuples { + ($($ident:ident $index:tt),+) => { + impl ),+> Params for ($($ident,)+){ + fn inject_account_nonce(&mut self, nonce: u64) { + $(self.$index.inject_account_nonce(nonce);)+ + } + + fn inject_block(&mut self, number: u64, hash: HashFor) { + $(self.$index.inject_block(number, hash);)+ + } + } + } +} + +#[rustfmt::skip] +const _: () = { + impl_tuples!(A 0); + impl_tuples!(A 0, B 1); + impl_tuples!(A 0, B 1, C 2); + impl_tuples!(A 0, B 1, C 2, D 3); + impl_tuples!(A 0, B 1, C 2, D 3, E 4); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22, X 23); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22, X 23, Y 24); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, T 19, U 20, V 21, W 22, X 23, Y 24, Z 25); +}; diff --git a/new/src/config/polkadot.rs b/new/src/config/polkadot.rs new file mode 100644 index 0000000000..88ba9507fc --- /dev/null +++ b/new/src/config/polkadot.rs @@ -0,0 +1,104 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Polkadot specific configuration + +use super::{Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder}; + +use crate::config::substrate::{SubstrateConfig, SubstrateConfigBuilder}; +use std::sync::Arc; +use scale_info_legacy::TypeRegistrySet; +use subxt_metadata::Metadata; + +pub use crate::config::substrate::{ SpecVersionForRange, SubstrateHeader }; +pub use crate::utils::{AccountId32, MultiAddress, MultiSignature}; +pub use primitive_types::{H256, U256}; + +/// Construct a [`PolkadotConfig`] using this. +pub struct PolkadotConfigBuilder(SubstrateConfigBuilder); + +impl PolkadotConfigBuilder { + /// Create a new [`PolkadotConfigBuilder`]. + pub fn new() -> Self { + let inner = SubstrateConfigBuilder::new() + .set_legacy_types(frame_decode::legacy_types::polkadot::relay_chain()); + + PolkadotConfigBuilder(inner) + } + + /// Set the metadata to be used for decoding blocks at the given spec versions. + pub fn set_metadata_for_spec_versions( + mut self, + ranges: impl Iterator)>, + ) -> Self { + self = Self(self.0.set_metadata_for_spec_versions(ranges)); + self + } + + /// Given an iterator of block ranges to spec version of the form `(start, end, spec_version)`, add them + /// to this configuration. + pub fn set_spec_version_for_block_ranges( + mut self, + ranges: impl Iterator, + ) -> Self { + self = Self(self.0.set_spec_version_for_block_ranges(ranges)); + self + } + + /// Construct the [`PolkadotConfig`] from this builder. + pub fn build(self) -> PolkadotConfig { + PolkadotConfig(self.0.build()) + } +} + +/// Configuration that's suitable for the Polkadot Relay Chain. +#[derive(Debug, Clone)] +pub struct PolkadotConfig(SubstrateConfig); + +impl Config for PolkadotConfig { + type AccountId = ::AccountId; + type Signature = ::Signature; + type Hasher = ::Hasher; + type Header = ::Header; + type AssetId = ::AssetId; + + // Address on Polkadot has no account index, whereas it's u32 on + // the default substrate dev node. + type Address = MultiAddress; + + // These are the same as the default substrate node, but redefined + // because we need to pass the PolkadotConfig trait as a param. + type ExtrinsicParams = PolkadotExtrinsicParams; + + fn legacy_types_for_spec_version(&'_ self, spec_version: u32) -> Option> { + self.0.legacy_types_for_spec_version(spec_version) + } + + fn spec_version_for_block_number(&self, block_number: u32) -> Option { + self.0.spec_version_for_block_number(block_number) + } + + fn metadata_for_spec_version( + &self, + spec_version: u32, + ) -> Option> { + self.0.metadata_for_spec_version(spec_version) + } + + fn set_metadata_for_spec_version( + &self, + spec_version: u32, + metadata: Arc, + ) { + self.0.set_metadata_for_spec_version(spec_version, metadata) + } +} + +/// A struct representing the signed extra and additional parameters required +/// to construct a transaction for a polkadot node. +pub type PolkadotExtrinsicParams = DefaultExtrinsicParams; + +/// A builder which leads to [`PolkadotExtrinsicParams`] being constructed. +/// This is what you provide to methods like `sign_and_submit()`. +pub type PolkadotExtrinsicParamsBuilder = DefaultExtrinsicParamsBuilder; diff --git a/new/src/config/substrate.rs b/new/src/config/substrate.rs new file mode 100644 index 0000000000..b383987fcb --- /dev/null +++ b/new/src/config/substrate.rs @@ -0,0 +1,529 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Substrate specific configuration + +use super::{Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, Hasher, Header}; +use crate::config::Hash; +pub use crate::utils::{AccountId32, MultiAddress, MultiSignature}; +use codec::{Decode, Encode}; +pub use primitive_types::{H256, U256}; +use serde::{Deserialize, Serialize}; +use subxt_metadata::Metadata; +use crate::utils::RangeMap; +use scale_info_legacy::{ChainTypeRegistry, TypeRegistrySet}; +use std::collections::HashMap; +use std::sync::Arc; +use std::sync::Mutex; + +/// Construct a [`SubstrateConfig`] using this. +pub struct SubstrateConfigBuilder { + legacy_types: Option, + spec_version_for_block_number: RangeMap, + metadata_for_spec_version: Mutex>>, +} + +impl Default for SubstrateConfigBuilder { + fn default() -> Self { + Self::new() + } +} + +impl SubstrateConfigBuilder { + /// Create a new builder to construct a [`SubstrateConfig`] from. + pub fn new() -> Self { + SubstrateConfigBuilder { + legacy_types: None, + spec_version_for_block_number: RangeMap::empty(), + metadata_for_spec_version: Mutex::new(HashMap::new()), + } + } + + /// Set the legacy types to use for this configuration. This enables support for + /// blocks produced by Runtimes that emit metadata older than V14. + pub fn set_legacy_types(mut self, legacy_types: ChainTypeRegistry) -> Self { + self.legacy_types = Some(legacy_types); + self + } + + /// Set the metadata to be used for decoding blocks at the given spec versions. + pub fn set_metadata_for_spec_versions( + self, + ranges: impl Iterator)>, + ) -> Self { + let mut map = self.metadata_for_spec_version.lock().unwrap(); + for (spec_version, metadata) in ranges { + map.insert(spec_version, metadata); + } + drop(map); + self + } + + /// Given an iterator of block ranges to spec version of the form `(start, end, spec_version)`, add them + /// to this configuration. + pub fn set_spec_version_for_block_ranges( + mut self, + ranges: impl Iterator, + ) -> Self { + let mut m = RangeMap::builder(); + for version_for_range in ranges { + let start = version_for_range.block_range.start; + let end = version_for_range.block_range.end; + let spec_version = version_for_range.spec_version; + m = m.add_range(start, end, spec_version); + } + self.spec_version_for_block_number = m.build(); + self + } + + /// Construct the [`SubstrateConfig`] from this builder. + pub fn build(self) -> SubstrateConfig { + SubstrateConfig { + inner: Arc::new(SubstrateConfigInner { + legacy_types: self.legacy_types, + spec_version_for_block_number: self.spec_version_for_block_number, + metadata_for_spec_version: self.metadata_for_spec_version, + }) + } + } +} + +/// Define a spec version for a range of blocks. The new spec version is expected +/// to begin at the first block in the range and end just prior to the last block +/// in the range. +pub struct SpecVersionForRange { + /// The block range that this spec version applies to. Inclusive of the start + /// and exclusive of the enc. + pub block_range: std::ops::Range, + /// The spec version at this block range. + pub spec_version: u32, +} + +/// Configuration that's suitable for standard Substrate chains (ie those +/// that have not customized the block hash type). +#[derive(Debug, Clone)] +pub struct SubstrateConfig { + inner: Arc +} + +#[derive(Debug)] +struct SubstrateConfigInner { + legacy_types: Option, + spec_version_for_block_number: RangeMap, + metadata_for_spec_version: Mutex>>, +} + +impl SubstrateConfig { + /// Build a new [`SubstrateConfig`]. + pub fn builder() -> SubstrateConfigBuilder { + SubstrateConfigBuilder::new() + } +} + +impl Config for SubstrateConfig { + type AccountId = AccountId32; + type Address = MultiAddress; + type Signature = MultiSignature; + type Hasher = DynamicHasher256; + type Header = SubstrateHeader<::Hash>; + type ExtrinsicParams = SubstrateExtrinsicParams; + type AssetId = u32; + + fn legacy_types_for_spec_version(&'_ self, spec_version: u32) -> Option> { + self.inner + .legacy_types + .as_ref() + .map(|types| types.for_spec_version(spec_version as u64)) + } + + fn spec_version_for_block_number(&self, block_number: u32) -> Option { + self.inner.spec_version_for_block_number + .get(block_number) + .copied() + } + + fn metadata_for_spec_version( + &self, + spec_version: u32, + ) -> Option> { + self.inner + .metadata_for_spec_version + .lock() + .unwrap() + .get(&spec_version) + .cloned() + } + + fn set_metadata_for_spec_version( + &self, + spec_version: u32, + metadata: Arc, + ) { + self.inner + .metadata_for_spec_version + .lock() + .unwrap() + .insert(spec_version, metadata); + } +} + +/// A struct representing the signed extra and additional parameters required +/// to construct a transaction for the default substrate node. +pub type SubstrateExtrinsicParams = DefaultExtrinsicParams; + +/// A builder which leads to [`SubstrateExtrinsicParams`] being constructed. +/// This is what you provide to methods like `sign_and_submit()`. +pub type SubstrateExtrinsicParamsBuilder = DefaultExtrinsicParamsBuilder; + +/// A hasher (ie implements [`Hasher`]) which hashes values using the blaks2_256 algorithm. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct BlakeTwo256; + +impl Hasher for BlakeTwo256 { + type Hash = H256; + + fn new(_metadata: &Metadata) -> Self { + Self + } + + fn hash(&self, s: &[u8]) -> Self::Hash { + sp_crypto_hashing::blake2_256(s).into() + } +} + +/// A hasher (ie implements [`Hasher`]) which inspects the runtime metadata to decide how to +/// hash types, falling back to blake2_256 if the hasher information is not available. +/// +/// Currently this hasher supports only `BlakeTwo256` and `Keccak256` hashing methods. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct DynamicHasher256(HashType); + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum HashType { + // Most chains use this: + BlakeTwo256, + // Chains like Hyperbridge use this (tends to be eth compatible chains) + Keccak256, + // If we don't have V16 metadata, we'll emit this and default to BlakeTwo256. + Unknown, +} + +impl Hasher for DynamicHasher256 { + type Hash = H256; + + fn new(metadata: &Metadata) -> Self { + // Determine the Hash associated type used for the current chain, if possible. + let Some(system_pallet) = metadata.pallet_by_name("System") else { + return Self(HashType::Unknown); + }; + let Some(hash_ty_id) = system_pallet.associated_type_id("Hashing") else { + return Self(HashType::Unknown); + }; + + let ty = metadata + .types() + .resolve(hash_ty_id) + .expect("Type information for 'Hashing' associated type should be in metadata"); + + let hash_type = match ty.path.ident().as_deref().unwrap_or("") { + "BlakeTwo256" => HashType::BlakeTwo256, + "Keccak256" => HashType::Keccak256, + _ => HashType::Unknown, + }; + + Self(hash_type) + } + + fn hash(&self, s: &[u8]) -> Self::Hash { + match self.0 { + HashType::BlakeTwo256 | HashType::Unknown => sp_crypto_hashing::blake2_256(s).into(), + HashType::Keccak256 => sp_crypto_hashing::keccak_256(s).into(), + } + } +} + +/// A generic Substrate header type, adapted from `sp_runtime::generic::Header`. +/// The block number and hasher can be configured to adapt this for other nodes. +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct SubstrateHeader { + /// The parent hash. + pub parent_hash: Hash, + /// The block number. + #[serde( + serialize_with = "serialize_number", + deserialize_with = "deserialize_number" + )] + #[codec(compact)] + pub number: u32, + /// The state trie merkle root + pub state_root: Hash, + /// The merkle root of the extrinsics. + pub extrinsics_root: Hash, + /// A chain-specific digest of data useful for light clients or referencing auxiliary data. + pub digest: Digest, +} + +impl Header for SubstrateHeader +where + H: Hash, + SubstrateHeader: Encode + Decode, +{ + fn number(&self) -> u32 { + self.number.into() + } +} + +/// Generic header digest. From `sp_runtime::generic::digest`. +#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)] +pub struct Digest { + /// A list of digest items. + pub logs: Vec, +} + +/// Digest item that is able to encode/decode 'system' digest items and +/// provide opaque access to other items. From `sp_runtime::generic::digest`. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum DigestItem { + /// A pre-runtime digest. + /// + /// These are messages from the consensus engine to the runtime, although + /// the consensus engine can (and should) read them itself to avoid + /// code and state duplication. It is erroneous for a runtime to produce + /// these, but this is not (yet) checked. + /// + /// NOTE: the runtime is not allowed to panic or fail in an `on_initialize` + /// call if an expected `PreRuntime` digest is not present. It is the + /// responsibility of a external block verifier to check this. Runtime API calls + /// will initialize the block without pre-runtime digests, so initialization + /// cannot fail when they are missing. + PreRuntime(ConsensusEngineId, Vec), + + /// A message from the runtime to the consensus engine. This should *never* + /// be generated by the native code of any consensus engine, but this is not + /// checked (yet). + Consensus(ConsensusEngineId, Vec), + + /// Put a Seal on it. This is only used by native code, and is never seen + /// by runtimes. + Seal(ConsensusEngineId, Vec), + + /// Some other thing. Unsupported and experimental. + Other(Vec), + + /// An indication for the light clients that the runtime execution + /// environment is updated. + /// + /// Currently this is triggered when: + /// 1. Runtime code blob is changed or + /// 2. `heap_pages` value is changed. + RuntimeEnvironmentUpdated, +} + +// From sp_runtime::generic, DigestItem enum indexes are encoded using this: +#[repr(u32)] +#[derive(Encode, Decode)] +enum DigestItemType { + Other = 0u32, + Consensus = 4u32, + Seal = 5u32, + PreRuntime = 6u32, + RuntimeEnvironmentUpdated = 8u32, +} +impl Encode for DigestItem { + fn encode(&self) -> Vec { + let mut v = Vec::new(); + + match self { + Self::Consensus(val, data) => { + DigestItemType::Consensus.encode_to(&mut v); + (val, data).encode_to(&mut v); + } + Self::Seal(val, sig) => { + DigestItemType::Seal.encode_to(&mut v); + (val, sig).encode_to(&mut v); + } + Self::PreRuntime(val, data) => { + DigestItemType::PreRuntime.encode_to(&mut v); + (val, data).encode_to(&mut v); + } + Self::Other(val) => { + DigestItemType::Other.encode_to(&mut v); + val.encode_to(&mut v); + } + Self::RuntimeEnvironmentUpdated => { + DigestItemType::RuntimeEnvironmentUpdated.encode_to(&mut v); + } + } + + v + } +} +impl Decode for DigestItem { + fn decode(input: &mut I) -> Result { + let item_type: DigestItemType = Decode::decode(input)?; + match item_type { + DigestItemType::PreRuntime => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Ok(Self::PreRuntime(vals.0, vals.1)) + } + DigestItemType::Consensus => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Ok(Self::Consensus(vals.0, vals.1)) + } + DigestItemType::Seal => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Ok(Self::Seal(vals.0, vals.1)) + } + DigestItemType::Other => Ok(Self::Other(Decode::decode(input)?)), + DigestItemType::RuntimeEnvironmentUpdated => Ok(Self::RuntimeEnvironmentUpdated), + } + } +} + +/// Consensus engine unique ID. From `sp_runtime::ConsensusEngineId`. +pub type ConsensusEngineId = [u8; 4]; + +impl serde::Serialize for DigestItem { + fn serialize(&self, seq: S) -> Result + where + S: serde::Serializer, + { + self.using_encoded(|bytes| impl_serde::serialize::serialize(bytes, seq)) + } +} + +impl<'a> serde::Deserialize<'a> for DigestItem { + fn deserialize(de: D) -> Result + where + D: serde::Deserializer<'a>, + { + let r = impl_serde::serialize::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| serde::de::Error::custom(format!("Decode error: {e}"))) + } +} + +fn serialize_number>(val: &T, s: S) -> Result +where + S: serde::Serializer, +{ + let u256: U256 = (*val).into(); + serde::Serialize::serialize(&u256, s) +} + +fn deserialize_number<'a, D, T: TryFrom>(d: D) -> Result +where + D: serde::Deserializer<'a>, +{ + // At the time of writing, Smoldot gives back block numbers in numeric rather + // than hex format. So let's support deserializing from both here: + let number_or_hex = NumberOrHex::deserialize(d)?; + let u256 = number_or_hex.into_u256(); + TryFrom::try_from(u256).map_err(|_| serde::de::Error::custom("Try from failed")) +} + +/// A number type that can be serialized both as a number or a string that encodes a number in a +/// string. +/// +/// We allow two representations of the block number as input. Either we deserialize to the type +/// that is specified in the block type or we attempt to parse given hex value. +/// +/// The primary motivation for having this type is to avoid overflows when using big integers in +/// JavaScript (which we consider as an important RPC API consumer). +#[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(untagged)] +pub enum NumberOrHex { + /// The number represented directly. + Number(u64), + /// Hex representation of the number. + Hex(U256), +} + +impl NumberOrHex { + /// Converts this number into an U256. + pub fn into_u256(self) -> U256 { + match self { + NumberOrHex::Number(n) => n.into(), + NumberOrHex::Hex(h) => h, + } + } +} + +impl From for U256 { + fn from(num_or_hex: NumberOrHex) -> U256 { + num_or_hex.into_u256() + } +} + +macro_rules! into_number_or_hex { + ($($t: ty)+) => { + $( + impl From<$t> for NumberOrHex { + fn from(x: $t) -> Self { + NumberOrHex::Number(x.into()) + } + } + )+ + } +} +into_number_or_hex!(u8 u16 u32 u64); + +impl From for NumberOrHex { + fn from(n: u128) -> Self { + NumberOrHex::Hex(n.into()) + } +} + +impl From for NumberOrHex { + fn from(n: U256) -> Self { + NumberOrHex::Hex(n) + } +} + +#[cfg(test)] +mod test { + use super::*; + + // Smoldot returns numeric block numbers in the header at the time of writing; + // ensure we can deserialize them properly. + #[test] + fn can_deserialize_numeric_block_number() { + let numeric_block_number_json = r#" + { + "digest": { + "logs": [] + }, + "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "number": 4, + "parentHash": "0xcb2690b2c85ceab55be03fc7f7f5f3857e7efeb7a020600ebd4331e10be2f7a5", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + "#; + + let header: SubstrateHeader = + serde_json::from_str(numeric_block_number_json).expect("valid block header"); + assert_eq!(header.number(), 4); + } + + // Substrate returns hex block numbers; ensure we can also deserialize those OK. + #[test] + fn can_deserialize_hex_block_number() { + let numeric_block_number_json = r#" + { + "digest": { + "logs": [] + }, + "extrinsicsRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", + "number": "0x04", + "parentHash": "0xcb2690b2c85ceab55be03fc7f7f5f3857e7efeb7a020600ebd4331e10be2f7a5", + "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + "#; + + let header: SubstrateHeader = + serde_json::from_str(numeric_block_number_json).expect("valid block header"); + assert_eq!(header.number(), 4); + } +} diff --git a/new/src/config/transaction_extensions.rs b/new/src/config/transaction_extensions.rs new file mode 100644 index 0000000000..2fe85aba25 --- /dev/null +++ b/new/src/config/transaction_extensions.rs @@ -0,0 +1,703 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! This module contains implementations for common transaction extensions, each +//! of which implements [`TransactionExtension`], and can be used in conjunction with +//! [`AnyOf`] to configure the set of transaction extensions which are known about +//! when interacting with a chain. + +use super::extrinsic_params::{ ExtrinsicParams, ClientState }; +use crate::config::ExtrinsicParamsEncoder; +use crate::config::{Config, HashFor}; +use crate::error::ExtrinsicParamsError; +use crate::utils::{Era, Static}; +use codec::{Compact, Encode}; +use core::any::Any; +use core::fmt::Debug; +use derive_where::derive_where; +use std::collections::HashMap; +use scale_decode::DecodeAsType; +use scale_info::PortableRegistry; + +// Re-export this here; it's a bit generically named to be re-exported from ::config. +pub use super::extrinsic_params::Params; + +/// A single [`TransactionExtension`] has a unique name, but is otherwise the +/// same as [`ExtrinsicParams`] in describing how to encode the extra and +/// additional data. +pub trait TransactionExtension: ExtrinsicParams { + /// The type representing the `extra` / value bytes of a transaction extension. + /// Decoding from this type should be symmetrical to the respective + /// `ExtrinsicParamsEncoder::encode_value_to()` implementation of this transaction extension. + type Decoded: DecodeAsType; + + /// This should return true if the transaction extension matches the details given. + /// Often, this will involve just checking that the identifier given matches that of the + /// extension in question. + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool; +} + +/// The [`VerifySignature`] extension. For V5 General transactions, this is how a signature +/// is provided. The signature is constructed by signing a payload which contains the +/// transaction call data as well as the encoded "additional" bytes for any extensions _after_ +/// this one in the list. +pub struct VerifySignature(VerifySignatureDetails); + +impl ExtrinsicParams for VerifySignature { + type Params = (); + + fn new(_client: &ClientState, _params: Self::Params) -> Result { + Ok(VerifySignature(VerifySignatureDetails::Disabled)) + } +} + +impl ExtrinsicParamsEncoder for VerifySignature { + fn encode_value_to(&self, v: &mut Vec) { + self.0.encode_to(v); + } + fn encode_signer_payload_value_to(&self, v: &mut Vec) { + // This extension is never encoded to the signer payload, and extensions + // prior to this are ignored when creating said payload, so clear anything + // we've seen so far. + v.clear(); + } + fn encode_implicit_to(&self, v: &mut Vec) { + // We only use the "implicit" data for extensions _after_ this one + // in the pipeline to form the signer payload. Thus, clear anything + // we've seen so far. + v.clear(); + } + + fn inject_signature(&mut self, account: &dyn Any, signature: &dyn Any) { + // Downcast refs back to concrete types (we use `&dyn Any`` so that the trait remains object safe) + let account = account + .downcast_ref::() + .expect("A T::AccountId should have been provided") + .clone(); + let signature = signature + .downcast_ref::() + .expect("A T::Signature should have been provided") + .clone(); + + // The signature is not set through params, only here, once given by a user: + self.0 = VerifySignatureDetails::Signed { signature, account } + } +} + +impl TransactionExtension for VerifySignature { + type Decoded = Static>; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "VerifySignature" + } +} + +/// This allows a signature to be provided to the [`VerifySignature`] transaction extension. +// Dev note: this must encode identically to https://github.com/paritytech/polkadot-sdk/blob/fd72d58313c297a10600037ce1bb88ec958d722e/substrate/frame/verify-signature/src/extension.rs#L43 +#[derive(codec::Encode, codec::Decode)] +pub enum VerifySignatureDetails { + /// A signature has been provided. + Signed { + /// The signature. + signature: T::Signature, + /// The account that generated the signature. + account: T::AccountId, + }, + /// No signature was provided. + Disabled, +} + +/// The [`CheckMetadataHash`] transaction extension. +pub struct CheckMetadataHash { + // Eventually we might provide or calculate the metadata hash here, + // but for now we never provide a hash and so this is empty. +} + +impl ExtrinsicParams for CheckMetadataHash { + type Params = (); + + fn new(_client: &ClientState, _params: Self::Params) -> Result { + Ok(CheckMetadataHash {}) + } +} + +impl ExtrinsicParamsEncoder for CheckMetadataHash { + fn encode_value_to(&self, v: &mut Vec) { + // A single 0 byte in the TX payload indicates that the chain should + // _not_ expect any metadata hash to exist in the signer payload. + 0u8.encode_to(v); + } + fn encode_implicit_to(&self, v: &mut Vec) { + // We provide no metadata hash in the signer payload to align with the above. + None::<()>.encode_to(v); + } +} + +impl TransactionExtension for CheckMetadataHash { + type Decoded = CheckMetadataHashMode; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckMetadataHash" + } +} + +/// Is metadata checking enabled or disabled? +// Dev note: The "Disabled" and "Enabled" variant names match those that the +// transaction extension will be encoded with, in order that DecodeAsType will work +// properly. +#[derive(Copy, Clone, Debug, DecodeAsType)] +pub enum CheckMetadataHashMode { + /// No hash was provided in the signer payload. + Disabled, + /// A hash was provided in the signer payload. + Enabled, +} + +impl CheckMetadataHashMode { + /// Is metadata checking enabled or disabled for this transaction? + pub fn is_enabled(&self) -> bool { + match self { + CheckMetadataHashMode::Disabled => false, + CheckMetadataHashMode::Enabled => true, + } + } +} + +/// The [`CheckSpecVersion`] transaction extension. +pub struct CheckSpecVersion(u32); + +impl ExtrinsicParams for CheckSpecVersion { + type Params = (); + + fn new(client: &ClientState, _params: Self::Params) -> Result { + Ok(CheckSpecVersion(client.runtime_version.spec_version)) + } +} + +impl ExtrinsicParamsEncoder for CheckSpecVersion { + fn encode_implicit_to(&self, v: &mut Vec) { + self.0.encode_to(v); + } +} + +impl TransactionExtension for CheckSpecVersion { + type Decoded = (); + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckSpecVersion" + } +} + +/// The [`CheckNonce`] transaction extension. +pub struct CheckNonce(u64); + +impl ExtrinsicParams for CheckNonce { + type Params = CheckNonceParams; + + fn new(_client: &ClientState, params: Self::Params) -> Result { + Ok(CheckNonce(params.0.unwrap_or(0))) + } +} + +impl ExtrinsicParamsEncoder for CheckNonce { + fn encode_value_to(&self, v: &mut Vec) { + Compact(self.0).encode_to(v); + } +} + +impl TransactionExtension for CheckNonce { + type Decoded = u64; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckNonce" + } +} + +/// Configure the nonce used. +#[derive(Debug, Clone, Default)] +pub struct CheckNonceParams(Option); + +impl CheckNonceParams { + /// Retrieve the nonce from the chain and use that. + pub fn from_chain() -> Self { + Self(None) + } + /// Manually set an account nonce to use. + pub fn with_nonce(nonce: u64) -> Self { + Self(Some(nonce)) + } +} + +impl Params for CheckNonceParams { + fn inject_account_nonce(&mut self, nonce: u64) { + if self.0.is_none() { + self.0 = Some(nonce) + } + } +} + +/// The [`CheckTxVersion`] transaction extension. +pub struct CheckTxVersion(u32); + +impl ExtrinsicParams for CheckTxVersion { + type Params = (); + + fn new(client: &ClientState, _params: Self::Params) -> Result { + Ok(CheckTxVersion(client.runtime_version.transaction_version)) + } +} + +impl ExtrinsicParamsEncoder for CheckTxVersion { + fn encode_implicit_to(&self, v: &mut Vec) { + self.0.encode_to(v); + } +} + +impl TransactionExtension for CheckTxVersion { + type Decoded = (); + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckTxVersion" + } +} + +/// The [`CheckGenesis`] transaction extension. +pub struct CheckGenesis(HashFor); + +impl ExtrinsicParams for CheckGenesis { + type Params = (); + + fn new(client: &ClientState, _params: Self::Params) -> Result { + Ok(CheckGenesis(client.genesis_hash)) + } +} + +impl ExtrinsicParamsEncoder for CheckGenesis { + fn encode_implicit_to(&self, v: &mut Vec) { + self.0.encode_to(v); + } +} + +impl TransactionExtension for CheckGenesis { + type Decoded = (); + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckGenesis" + } +} + +/// The [`CheckMortality`] transaction extension. +pub struct CheckMortality { + params: CheckMortalityParamsInner, + genesis_hash: HashFor, +} + +impl ExtrinsicParams for CheckMortality { + type Params = CheckMortalityParams; + + fn new(client: &ClientState, params: Self::Params) -> Result { + // If a user has explicitly configured the transaction to be mortal for n blocks, but we get + // to this stage and no injected information was able to turn this into MortalFromBlock{..}, + // then we hit an error as we are unable to construct a mortal transaction here. + if matches!(¶ms.0, CheckMortalityParamsInner::MortalForBlocks(_)) { + return Err(ExtrinsicParamsError::custom( + "CheckMortality: We cannot construct an offline extrinsic with only the number of blocks it is mortal for. Use mortal_from_unchecked instead.", + )); + } + + Ok(CheckMortality { + // if nothing has been explicitly configured, we will have a mortal transaction + // valid for 32 blocks if block info is available. + params: params.0, + genesis_hash: client.genesis_hash, + }) + } +} + +impl ExtrinsicParamsEncoder for CheckMortality { + fn encode_value_to(&self, v: &mut Vec) { + match &self.params { + CheckMortalityParamsInner::MortalFromBlock { + for_n_blocks, + from_block_n, + .. + } => { + Era::mortal(*for_n_blocks, *from_block_n).encode_to(v); + } + _ => { + // Note: if we see `CheckMortalityInner::MortalForBlocks`, then it means the user has + // configured a block to be mortal for N blocks, but the current block was never injected, + // so we don't know where to start from and default back to building an immortal tx. + Era::Immortal.encode_to(v); + } + } + } + fn encode_implicit_to(&self, v: &mut Vec) { + match &self.params { + CheckMortalityParamsInner::MortalFromBlock { + from_block_hash, .. + } => { + from_block_hash.encode_to(v); + } + _ => { + self.genesis_hash.encode_to(v); + } + } + } +} + +impl TransactionExtension for CheckMortality { + type Decoded = Era; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "CheckMortality" + } +} + +/// Parameters to configure the [`CheckMortality`] transaction extension. +pub struct CheckMortalityParams(CheckMortalityParamsInner); + +enum CheckMortalityParamsInner { + /// The transaction will be immortal. + Immortal, + /// The transaction is mortal for N blocks. This must be "upgraded" into + /// [`CheckMortalityParamsInner::MortalFromBlock`] to ultimately work. + MortalForBlocks(u64), + /// The transaction is mortal for N blocks, but if it cannot be "upgraded", + /// then it will be set to immortal instead. This is the default if unset. + MortalForBlocksOrImmortalIfNotPossible(u64), + /// The transaction is mortal and all of the relevant information is provided. + MortalFromBlock { + for_n_blocks: u64, + from_block_n: u64, + from_block_hash: HashFor, + }, +} + +impl Default for CheckMortalityParams { + fn default() -> Self { + // default to being mortal for 32 blocks if possible, else immortal: + CheckMortalityParams(CheckMortalityParamsInner::MortalForBlocksOrImmortalIfNotPossible(32)) + } +} + +impl CheckMortalityParams { + /// Configure a transaction that will be mortal for the number of blocks given. + pub fn mortal(for_n_blocks: u64) -> Self { + Self(CheckMortalityParamsInner::MortalForBlocks(for_n_blocks)) + } + + /// Configure a transaction that will be mortal for the number of blocks given, + /// and from the block details provided. Prefer to use [`CheckMortalityParams::mortal()`] + /// where possible, which prevents the block number and hash from being misaligned. + pub fn mortal_from_unchecked( + for_n_blocks: u64, + from_block_n: u64, + from_block_hash: HashFor, + ) -> Self { + Self(CheckMortalityParamsInner::MortalFromBlock { + for_n_blocks, + from_block_n, + from_block_hash, + }) + } + /// An immortal transaction. + pub fn immortal() -> Self { + Self(CheckMortalityParamsInner::Immortal) + } +} + +impl Params for CheckMortalityParams { + fn inject_block(&mut self, from_block_n: u64, from_block_hash: HashFor) { + match &self.0 { + CheckMortalityParamsInner::MortalForBlocks(n) + | CheckMortalityParamsInner::MortalForBlocksOrImmortalIfNotPossible(n) => { + self.0 = CheckMortalityParamsInner::MortalFromBlock { + for_n_blocks: *n, + from_block_n, + from_block_hash, + } + } + _ => { + // Don't change anything if explicit Immortal or explicit block set. + } + } + } +} + +/// The [`ChargeAssetTxPayment`] transaction extension. +#[derive(DecodeAsType)] +#[derive_where(Clone, Debug; T::AssetId)] +#[decode_as_type(trait_bounds = "T::AssetId: DecodeAsType")] +pub struct ChargeAssetTxPayment { + tip: Compact, + asset_id: Option, +} + +impl ChargeAssetTxPayment { + /// Tip to the extrinsic author in the native chain token. + pub fn tip(&self) -> u128 { + self.tip.0 + } + + /// Tip to the extrinsic author using the asset ID given. + pub fn asset_id(&self) -> Option<&T::AssetId> { + self.asset_id.as_ref() + } +} + +impl ExtrinsicParams for ChargeAssetTxPayment { + type Params = ChargeAssetTxPaymentParams; + + fn new(_client: &ClientState, params: Self::Params) -> Result { + Ok(ChargeAssetTxPayment { + tip: Compact(params.tip), + asset_id: params.asset_id, + }) + } +} + +impl ExtrinsicParamsEncoder for ChargeAssetTxPayment { + fn encode_value_to(&self, v: &mut Vec) { + (self.tip, &self.asset_id).encode_to(v); + } +} + +impl TransactionExtension for ChargeAssetTxPayment { + type Decoded = Self; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "ChargeAssetTxPayment" + } +} + +/// Parameters to configure the [`ChargeAssetTxPayment`] transaction extension. +pub struct ChargeAssetTxPaymentParams { + tip: u128, + asset_id: Option, +} + +impl Default for ChargeAssetTxPaymentParams { + fn default() -> Self { + ChargeAssetTxPaymentParams { + tip: Default::default(), + asset_id: Default::default(), + } + } +} + +impl ChargeAssetTxPaymentParams { + /// Don't provide a tip to the extrinsic author. + pub fn no_tip() -> Self { + ChargeAssetTxPaymentParams { + tip: 0, + asset_id: None, + } + } + /// Tip the extrinsic author in the native chain token. + pub fn tip(tip: u128) -> Self { + ChargeAssetTxPaymentParams { + tip, + asset_id: None, + } + } + /// Tip the extrinsic author using the asset ID given. + pub fn tip_of(tip: u128, asset_id: T::AssetId) -> Self { + ChargeAssetTxPaymentParams { + tip, + asset_id: Some(asset_id), + } + } +} + +impl Params for ChargeAssetTxPaymentParams {} + +/// The [`ChargeTransactionPayment`] transaction extension. +#[derive(Clone, Debug, DecodeAsType)] +pub struct ChargeTransactionPayment { + tip: Compact, +} + +impl ChargeTransactionPayment { + /// Tip to the extrinsic author in the native chain token. + pub fn tip(&self) -> u128 { + self.tip.0 + } +} + +impl ExtrinsicParams for ChargeTransactionPayment { + type Params = ChargeTransactionPaymentParams; + + fn new(_client: &ClientState, params: Self::Params) -> Result { + Ok(ChargeTransactionPayment { + tip: Compact(params.tip), + }) + } +} + +impl ExtrinsicParamsEncoder for ChargeTransactionPayment { + fn encode_value_to(&self, v: &mut Vec) { + self.tip.encode_to(v); + } +} + +impl TransactionExtension for ChargeTransactionPayment { + type Decoded = Self; + fn matches(identifier: &str, _type_id: u32, _types: &PortableRegistry) -> bool { + identifier == "ChargeTransactionPayment" + } +} + +/// Parameters to configure the [`ChargeTransactionPayment`] transaction extension. +#[derive(Default)] +pub struct ChargeTransactionPaymentParams { + tip: u128, +} + +impl ChargeTransactionPaymentParams { + /// Don't provide a tip to the extrinsic author. + pub fn no_tip() -> Self { + ChargeTransactionPaymentParams { tip: 0 } + } + /// Tip the extrinsic author in the native chain token. + pub fn tip(tip: u128) -> Self { + ChargeTransactionPaymentParams { tip } + } +} + +impl Params for ChargeTransactionPaymentParams {} + +/// This accepts a tuple of [`TransactionExtension`]s, and will dynamically make use of whichever +/// ones are actually required for the chain in the correct order, ignoring the rest. This +/// is a sensible default, and allows for a single configuration to work across multiple chains. +pub struct AnyOf { + params: Vec>, + _marker: core::marker::PhantomData<(T, Params)>, +} + +macro_rules! impl_tuples { + ($($ident:ident $index:tt),+) => { + // We do some magic when the tuple is wrapped in AnyOf. We + // look at the metadata, and use this to select and make use of only the extensions + // that we actually need for the chain we're dealing with. + impl ExtrinsicParams for AnyOf + where + T: Config, + $($ident: TransactionExtension,)+ + { + type Params = ($($ident::Params,)+); + + fn new( + client: &ClientState, + params: Self::Params, + ) -> Result { + let metadata = &client.metadata; + let types = metadata.types(); + + // For each transaction extension in the tuple, find the matching index in the metadata, if + // there is one, and add it to a map with that index as the key. + let mut exts_by_index = HashMap::new(); + $({ + for (idx, e) in metadata.extrinsic().transaction_extensions_to_use_for_encoding().enumerate() { + // Skip over any exts that have a match already: + if exts_by_index.contains_key(&idx) { + continue + } + // Break and record as soon as we find a match: + if $ident::matches(e.identifier(), e.extra_ty(), types) { + let ext = $ident::new(client, params.$index)?; + let boxed_ext: Box = Box::new(ext); + exts_by_index.insert(idx, boxed_ext); + break + } + } + })+ + + // Next, turn these into an ordered vec, erroring if we haven't matched on any exts yet. + let mut params = Vec::new(); + for (idx, e) in metadata.extrinsic().transaction_extensions_to_use_for_encoding().enumerate() { + let Some(ext) = exts_by_index.remove(&idx) else { + if is_type_empty(e.extra_ty(), types) { + continue + } else { + return Err(ExtrinsicParamsError::UnknownTransactionExtension(e.identifier().to_owned())); + } + }; + params.push(ext); + } + + Ok(AnyOf { + params, + _marker: core::marker::PhantomData + }) + } + } + + impl ExtrinsicParamsEncoder for AnyOf + where + T: Config, + $($ident: TransactionExtension,)+ + { + fn encode_value_to(&self, v: &mut Vec) { + for ext in &self.params { + ext.encode_value_to(v); + } + } + fn encode_signer_payload_value_to(&self, v: &mut Vec) { + for ext in &self.params { + ext.encode_signer_payload_value_to(v); + } + } + fn encode_implicit_to(&self, v: &mut Vec) { + for ext in &self.params { + ext.encode_implicit_to(v); + } + } + fn inject_signature(&mut self, account_id: &dyn Any, signature: &dyn Any) { + for ext in &mut self.params { + ext.inject_signature(account_id, signature); + } + } + } + } +} + +#[rustfmt::skip] +const _: () = { + impl_tuples!(A 0); + impl_tuples!(A 0, B 1); + impl_tuples!(A 0, B 1, C 2); + impl_tuples!(A 0, B 1, C 2, D 3); + impl_tuples!(A 0, B 1, C 2, D 3, E 4); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, U 19); + impl_tuples!(A 0, B 1, C 2, D 3, E 4, F 5, G 6, H 7, I 8, J 9, K 10, L 11, M 12, N 13, O 14, P 15, Q 16, R 17, S 18, U 19, V 20); +}; + +/// Checks to see whether the type being given is empty, ie would require +/// 0 bytes to encode. +fn is_type_empty(type_id: u32, types: &scale_info::PortableRegistry) -> bool { + let Some(ty) = types.resolve(type_id) else { + // Can't resolve; type may not be empty. Not expected to hit this. + return false; + }; + + use scale_info::TypeDef; + match &ty.type_def { + TypeDef::Composite(c) => c.fields.iter().all(|f| is_type_empty(f.ty.id, types)), + TypeDef::Array(a) => a.len == 0 || is_type_empty(a.type_param.id, types), + TypeDef::Tuple(t) => t.fields.iter().all(|f| is_type_empty(f.id, types)), + // Explicitly list these in case any additions are made in the future. + TypeDef::BitSequence(_) + | TypeDef::Variant(_) + | TypeDef::Sequence(_) + | TypeDef::Compact(_) + | TypeDef::Primitive(_) => false, + } +} diff --git a/new/src/error.rs b/new/src/error.rs new file mode 100644 index 0000000000..eec82db3f9 --- /dev/null +++ b/new/src/error.rs @@ -0,0 +1,933 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Types representing the errors that can be returned. + +mod dispatch_error; +mod hex; + +use thiserror::Error as DeriveError; + +#[cfg(feature = "unstable-light-client")] +pub use subxt_lightclient::LightClientError; + +// Re-export dispatch error types: +pub use dispatch_error::{ + ArithmeticError, DispatchError, ModuleError, TokenError, TransactionalError, +}; + +// Re-expose the errors we use from other crates here: +pub use subxt_metadata::Metadata; +pub use hex::Hex; +pub use scale_decode::Error as DecodeError; +pub use scale_encode::Error as EncodeError; +pub use subxt_metadata::TryFromError as MetadataTryFromError; + +/// A global error type. Any of the errors exposed here can convert into this +/// error via `.into()`, but this error isn't itself exposed from anything. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum Error { + #[error(transparent)] + ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt), + #[error(transparent)] + ConstantError(#[from] ConstantError), + #[error(transparent)] + CustomValueError(#[from] CustomValueError), + #[error(transparent)] + StorageKeyError(#[from] StorageKeyError), + #[error(transparent)] + StorageValueError(#[from] StorageValueError), + #[error(transparent)] + BackendError(#[from] BackendError), + #[error(transparent)] + BlockError(#[from] BlockError), + #[error(transparent)] + AccountNonceError(#[from] AccountNonceError), + #[error(transparent)] + OnlineClientError(#[from] OnlineClientError), + #[error(transparent)] + RuntimeUpdaterError(#[from] RuntimeUpdaterError), + #[error(transparent)] + RuntimeUpdateeApplyError(#[from] RuntimeUpdateeApplyError), + #[error(transparent)] + RuntimeApiError(#[from] RuntimeApiError), + #[error(transparent)] + EventsError(#[from] EventsError), + #[error(transparent)] + ExtrinsicError(#[from] ExtrinsicError), + #[error(transparent)] + ViewFunctionError(#[from] ViewFunctionError), + #[error(transparent)] + TransactionProgressError(#[from] TransactionProgressError), + #[error(transparent)] + TransactionStatusError(#[from] TransactionStatusError), + #[error(transparent)] + TransactionEventsError(#[from] TransactionEventsError), + #[error(transparent)] + TransactionFinalizedSuccessError(#[from] TransactionFinalizedSuccessError), + #[error(transparent)] + ModuleErrorDetailsError(#[from] ModuleErrorDetailsError), + #[error(transparent)] + ModuleErrorDecodeError(#[from] ModuleErrorDecodeError), + #[error(transparent)] + DispatchErrorDecodeError(#[from] DispatchErrorDecodeError), + #[error(transparent)] + StorageError(#[from] StorageError), + // Dev note: Subxt doesn't directly return Raw* errors. These exist so that when + // users use common crates (like parity-scale-codec and subxt-rpcs), errors returned + // there can be handled automatically using ? when the expected error is subxt::Error. + #[error("Other RPC client error: {0}")] + OtherRpcClientError(#[from] subxt_rpcs::Error), + #[error("Other codec error: {0}")] + OtherCodecError(#[from] codec::Error), + #[cfg(feature = "unstable-light-client")] + #[error("Other light client error: {0}")] + OtherLightClientError(#[from] subxt_lightclient::LightClientError), + #[cfg(feature = "unstable-light-client")] + #[error("Other light client RPC error: {0}")] + OtherLightClientRpcError(#[from] subxt_lightclient::LightClientRpcError), + // Dev note: Nothing in subxt should ever emit this error. It can instead be used + // to easily map other errors into a subxt::Error for convenience. Some From impls + // make this automatic for common "other" error types. + #[error("Other error: {0}")] + Other(Box), +} + +impl From for Error { + fn from(value: std::convert::Infallible) -> Self { + match value {} + } +} + +impl Error { + /// Create a generic error. This is a quick workaround when you are using + /// [`Error`] and have a non-Subxt error to return. + pub fn other(error: E) -> Error { + Error::Other(Box::new(error)) + } + + /// Create a generic error from a string. This is a quick workaround when you are using + /// [`Error`] and have a non-Subxt error to return. + pub fn other_str(error: impl Into) -> Error { + #[derive(thiserror::Error, Debug, Clone)] + #[error("{0}")] + struct StrError(String); + Error::Other(Box::new(StrError(error.into()))) + } + + /// Checks whether the error was caused by a RPC re-connection. + pub fn is_disconnected_will_reconnect(&self) -> bool { + matches!( + self.backend_error(), + Some(BackendError::Rpc(RpcError::ClientError( + subxt_rpcs::Error::DisconnectedWillReconnect(_) + ))) + ) + } + + /// Checks whether the error was caused by a RPC request being rejected. + pub fn is_rpc_limit_reached(&self) -> bool { + matches!( + self.backend_error(), + Some(BackendError::Rpc(RpcError::LimitReached)) + ) + } + + fn backend_error(&self) -> Option<&BackendError> { + match self { + Error::BlockError(e) => e.backend_error(), + Error::AccountNonceError(e) => e.backend_error(), + Error::OnlineClientError(e) => e.backend_error(), + Error::RuntimeUpdaterError(e) => e.backend_error(), + Error::RuntimeApiError(e) => e.backend_error(), + Error::EventsError(e) => e.backend_error(), + Error::ExtrinsicError(e) => e.backend_error(), + Error::ViewFunctionError(e) => e.backend_error(), + Error::TransactionProgressError(e) => e.backend_error(), + Error::TransactionEventsError(e) => e.backend_error(), + Error::TransactionFinalizedSuccessError(e) => e.backend_error(), + Error::StorageError(e) => e.backend_error(), + // Any errors that **don't** return a BackendError anywhere will return None: + _ => None, + } + } +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum BackendError { + #[error("Backend error: RPC error: {0}")] + Rpc(#[from] RpcError), + #[error("Backend error: Could not find metadata version {0}")] + MetadataVersionNotFound(u32), + #[error("Backend error: Could not codec::Decode Runtime API response: {0}")] + CouldNotScaleDecodeRuntimeResponse(codec::Error), + #[error("Backend error: Could not codec::Decode metadata bytes into subxt::Metadata: {0}")] + CouldNotDecodeMetadata(codec::Error), + // This is for errors in `Backend` implementations which aren't any of the "pre-defined" set above: + #[error("Custom backend error: {0}")] + Other(String), +} + +impl BackendError { + /// Checks whether the error was caused by a RPC re-connection. + pub fn is_disconnected_will_reconnect(&self) -> bool { + matches!( + self, + BackendError::Rpc(RpcError::ClientError( + subxt_rpcs::Error::DisconnectedWillReconnect(_) + )) + ) + } + + /// Checks whether the error was caused by a RPC request being rejected. + pub fn is_rpc_limit_reached(&self) -> bool { + matches!(self, BackendError::Rpc(RpcError::LimitReached)) + } +} + +impl From for BackendError { + fn from(value: subxt_rpcs::Error) -> Self { + BackendError::Rpc(RpcError::ClientError(value)) + } +} + +/// An RPC error. Since we are generic over the RPC client that is used, +/// the error is boxed and could be casted. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +pub enum RpcError { + /// Error related to the RPC client. + #[error("RPC error: {0}")] + ClientError(#[from] subxt_rpcs::Error), + /// This error signals that we got back a [`subxt_rpcs::methods::chain_head::MethodResponse::LimitReached`], + /// which is not technically an RPC error but is treated as an error in our own APIs. + #[error("RPC error: limit reached")] + LimitReached, + /// The RPC subscription was dropped. + #[error("RPC error: subscription dropped.")] + SubscriptionDropped, +} + +/// Block error +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum BlockError { + #[error( + "Could not find the block body with hash {block_hash} (perhaps it was on a non-finalized fork?)" + )] + BlockNotFound { block_hash: Hex }, + #[error("Could not download the block header with hash {block_hash}: {reason}")] + CouldNotGetBlockHeader { + block_hash: Hex, + reason: BackendError, + }, + #[error("Could not download the latest block header: {0}")] + CouldNotGetLatestBlock(BackendError), + #[error("Could not subscribe to all blocks: {0}")] + CouldNotSubscribeToAllBlocks(BackendError), + #[error("Could not subscribe to best blocks: {0}")] + CouldNotSubscribeToBestBlocks(BackendError), + #[error("Could not subscribe to finalized blocks: {0}")] + CouldNotSubscribeToFinalizedBlocks(BackendError), + #[error("Error getting account nonce at block {block_hash}")] + AccountNonceError { + block_hash: Hex, + account_id: Hex, + reason: AccountNonceError, + }, +} + +impl BlockError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + BlockError::CouldNotGetBlockHeader { reason: e, .. } + | BlockError::CouldNotGetLatestBlock(e) + | BlockError::CouldNotSubscribeToAllBlocks(e) + | BlockError::CouldNotSubscribeToBestBlocks(e) + | BlockError::CouldNotSubscribeToFinalizedBlocks(e) => Some(e), + _ => None, + } + } +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum AccountNonceError { + #[error("Could not retrieve account nonce: {0}")] + CouldNotRetrieve(#[from] BackendError), + #[error("Could not decode account nonce: {0}")] + CouldNotDecode(#[from] codec::Error), + #[error("Wrong number of account nonce bytes returned: {0} (expected 2, 4 or 8)")] + WrongNumberOfBytes(usize), +} + +impl AccountNonceError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + AccountNonceError::CouldNotRetrieve(e) => Some(e), + _ => None, + } + } +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum OnlineClientError { + #[error("Cannot construct OnlineClient: {0}")] + RpcError(#[from] subxt_rpcs::Error), + #[error( + "Cannot construct OnlineClient: Cannot fetch latest finalized block to obtain init details from: {0}" + )] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch genesis hash: {0}")] + CannotGetGenesisHash(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch current runtime version: {0}")] + CannotGetCurrentRuntimeVersion(BackendError), + #[error("Cannot construct OnlineClient: Cannot fetch metadata: {0}")] + CannotFetchMetadata(BackendError), +} + +impl OnlineClientError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + OnlineClientError::CannotGetLatestFinalizedBlock(e) + | OnlineClientError::CannotGetGenesisHash(e) + | OnlineClientError::CannotGetCurrentRuntimeVersion(e) + | OnlineClientError::CannotFetchMetadata(e) => Some(e), + _ => None, + } + } +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum RuntimeUpdaterError { + #[error("Error subscribing to runtime updates: The update stream ended unexpectedly")] + UnexpectedEndOfUpdateStream, + #[error("Error subscribing to runtime updates: The finalized block stream ended unexpectedly")] + UnexpectedEndOfBlockStream, + #[error("Error subscribing to runtime updates: Can't stream runtime version: {0}")] + CannotStreamRuntimeVersion(BackendError), + #[error("Error subscribing to runtime updates: Can't get next runtime version in stream: {0}")] + CannotGetNextRuntimeVersion(BackendError), + #[error("Error subscribing to runtime updates: Cannot stream finalized blocks: {0}")] + CannotStreamFinalizedBlocks(BackendError), + #[error("Error subscribing to runtime updates: Cannot get next finalized block in stream: {0}")] + CannotGetNextFinalizedBlock(BackendError), + #[error("Cannot fetch new metadata for runtime update: {0}")] + CannotFetchNewMetadata(BackendError), + #[error( + "Error subscribing to runtime updates: Cannot find the System.LastRuntimeUpgrade storage entry" + )] + CantFindSystemLastRuntimeUpgrade, + #[error("Error subscribing to runtime updates: Cannot fetch last runtime upgrade: {0}")] + CantFetchLastRuntimeUpgrade(StorageError), + #[error("Error subscribing to runtime updates: Cannot decode last runtime upgrade: {0}")] + CannotDecodeLastRuntimeUpgrade(StorageValueError), +} + +impl RuntimeUpdaterError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + RuntimeUpdaterError::CannotStreamRuntimeVersion(e) + | RuntimeUpdaterError::CannotGetNextRuntimeVersion(e) + | RuntimeUpdaterError::CannotStreamFinalizedBlocks(e) + | RuntimeUpdaterError::CannotGetNextFinalizedBlock(e) + | RuntimeUpdaterError::CannotFetchNewMetadata(e) => Some(e), + _ => None, + } + } +} + +/// Error that can occur during upgrade. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RuntimeUpdateeApplyError { + #[error("The proposed runtime update is the same as the current version")] + SameVersion, +} + +/// Error working with Runtime APIs +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum RuntimeApiError { + #[error("The static Runtime API address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Runtime API trait not found: {0}")] + TraitNotFound(String), + #[error("Runtime API method {method_name} not found in trait {trait_name}")] + MethodNotFound { + trait_name: String, + method_name: String, + }, + #[error("Failed to encode Runtime API inputs: {0}")] + CouldNotEncodeInputs(frame_decode::runtime_apis::RuntimeApiInputsEncodeError), + #[error("Failed to decode Runtime API: {0}")] + CouldNotDecodeResponse(frame_decode::runtime_apis::RuntimeApiDecodeError), + #[error("Cannot access Runtime APIs at latest block: Cannot fetch latest finalized block: {0}")] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot call the Runtime API: {0}")] + CannotCallApi(BackendError), +} + +impl RuntimeApiError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + RuntimeApiError::CannotGetLatestFinalizedBlock(e) + | RuntimeApiError::CannotCallApi(e) => Some(e), + _ => None, + } + } +} + +/// Error working with events. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum EventsError { + #[error("Can't decode event: can't decode phase: {0}")] + CannotDecodePhase(codec::Error), + #[error("Can't decode event: can't decode pallet index: {0}")] + CannotDecodePalletIndex(codec::Error), + #[error("Can't decode event: can't decode variant index: {0}")] + CannotDecodeVariantIndex(codec::Error), + #[error("Can't decode event: can't find pallet with index {0}")] + CannotFindPalletWithIndex(u8), + #[error( + "Can't decode event: can't find variant with index {variant_index} in pallet {pallet_name}" + )] + CannotFindVariantWithIndex { + pallet_name: String, + variant_index: u8, + }, + #[error("Can't decode field {field_name:?} in event {pallet_name}.{event_name}: {reason}")] + CannotDecodeFieldInEvent { + pallet_name: String, + event_name: String, + field_name: String, + reason: scale_decode::visitor::DecodeError, + }, + #[error("Can't decode event topics: {0}")] + CannotDecodeEventTopics(codec::Error), + #[error("Can't decode the fields of event {pallet_name}.{event_name}: {reason}")] + CannotDecodeEventFields { + pallet_name: String, + event_name: String, + reason: scale_decode::Error, + }, + #[error("Can't decode event {pallet_name}.{event_name} to Event enum: {reason}")] + CannotDecodeEventEnum { + pallet_name: String, + event_name: String, + reason: scale_decode::Error, + }, + #[error("Cannot access events at latest block: Cannot fetch latest finalized block: {0}")] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot fetch event bytes: {0}")] + CannotFetchEventBytes(BackendError), +} + +impl EventsError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + EventsError::CannotGetLatestFinalizedBlock(e) + | EventsError::CannotFetchEventBytes(e) => Some(e), + _ => None, + } + } +} + +/// Error working with extrinsics. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum ExtrinsicError { + #[error("The extrinsic payload is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find extrinsic: pallet with name {0} not found")] + PalletNameNotFound(String), + #[error("Can't find extrinsic: call name {call_name} doesn't exist in pallet {pallet_name}")] + CallNameNotFound { + pallet_name: String, + call_name: String, + }, + #[error("Can't encode the extrinsic call data: {0}")] + CannotEncodeCallData(scale_encode::Error), + #[error("Subxt does not support the extrinsic versions expected by the chain")] + UnsupportedVersion, + #[error("Cannot construct the required transaction extensions: {0}")] + Params(#[from] ExtrinsicParamsError), + #[error("Cannot decode transaction extension '{name}': {error}")] + CouldNotDecodeTransactionExtension { + /// The extension name. + name: String, + /// The decode error. + error: scale_decode::Error, + }, + #[error( + "After decoding the extrinsic at index {extrinsic_index}, {num_leftover_bytes} bytes were left, suggesting that decoding may have failed" + )] + LeftoverBytes { + /// Index of the extrinsic that failed to decode. + extrinsic_index: usize, + /// Number of bytes leftover after decoding the extrinsic. + num_leftover_bytes: usize, + }, + #[error("{0}")] + ExtrinsicDecodeErrorAt(#[from] ExtrinsicDecodeErrorAt), + #[error("Failed to decode the fields of an extrinsic at index {extrinsic_index}: {error}")] + CannotDecodeFields { + /// Index of the extrinsic whose fields we could not decode + extrinsic_index: usize, + /// The decode error. + error: scale_decode::Error, + }, + #[error("Failed to decode the extrinsic at index {extrinsic_index} to a root enum: {error}")] + CannotDecodeIntoRootExtrinsic { + /// Index of the extrinsic that we failed to decode + extrinsic_index: usize, + /// The decode error. + error: scale_decode::Error, + }, + #[error("Could not download block body to extract extrinsics from: {0}")] + CannotGetBlockBody(BackendError), + #[error("Block not found: {0}")] + BlockNotFound(Hex), + #[error( + "Extrinsic submission error: Cannot get latest finalized block to grab account nonce at: {0}" + )] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot find block header for block {block_hash}")] + CannotFindBlockHeader { block_hash: Hex }, + #[error("Error getting account nonce at block {block_hash}")] + AccountNonceError { + block_hash: Hex, + account_id: Hex, + reason: AccountNonceError, + }, + #[error("Cannot submit extrinsic: {0}")] + ErrorSubmittingTransaction(BackendError), + #[error("A transaction status error was returned while submitting the extrinsic: {0}")] + TransactionStatusError(TransactionStatusError), + #[error( + "The transaction status stream encountered an error while submitting the extrinsic: {0}" + )] + TransactionStatusStreamError(BackendError), + #[error( + "The transaction status stream unexpectedly ended, so we don't know the status of the submitted extrinsic" + )] + UnexpectedEndOfTransactionStatusStream, + #[error("Cannot get fee info from Runtime API: {0}")] + CannotGetFeeInfo(BackendError), + #[error("Cannot get validation info from Runtime API: {0}")] + CannotGetValidationInfo(BackendError), + #[error("Cannot decode ValidationResult bytes: {0}")] + CannotDecodeValidationResult(codec::Error), + #[error("ValidationResult bytes could not be decoded")] + UnexpectedValidationResultBytes(Vec), +} + +impl ExtrinsicError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + ExtrinsicError::CannotGetBlockBody(e) + | ExtrinsicError::CannotGetLatestFinalizedBlock(e) + | ExtrinsicError::ErrorSubmittingTransaction(e) + | ExtrinsicError::TransactionStatusStreamError(e) + | ExtrinsicError::CannotGetFeeInfo(e) + | ExtrinsicError::CannotGetValidationInfo(e) => Some(e), + ExtrinsicError::AccountNonceError { reason, .. } => reason.backend_error(), + _ => None, + } + } +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum CustomValueError { + #[error("The static custom value address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("The custom value '{0}' was not found")] + NotFound(String), + #[error("Failed to decode custom value: {0}")] + CouldNotDecodeCustomValue(frame_decode::custom_values::CustomValueDecodeError), +} + +/// Error working with View Functions. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum ViewFunctionError { + #[error("The static View Function address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find View Function: pallet {0} not found")] + PalletNotFound(String), + #[error("Can't find View Function {function_name} in pallet {pallet_name}")] + ViewFunctionNotFound { + pallet_name: String, + function_name: String, + }, + #[error("Failed to encode View Function inputs: {0}")] + CouldNotEncodeInputs(frame_decode::view_functions::ViewFunctionInputsEncodeError), + #[error("Failed to decode View Function: {0}")] + CouldNotDecodeResponse(frame_decode::view_functions::ViewFunctionDecodeError), + #[error( + "Cannot access View Functions at latest block: Cannot fetch latest finalized block: {0}" + )] + CannotGetLatestFinalizedBlock(BackendError), + #[error("Cannot call the View Function Runtime API: {0}")] + CannotCallApi(BackendError), +} + +impl ViewFunctionError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + ViewFunctionError::CannotGetLatestFinalizedBlock(e) + | ViewFunctionError::CannotCallApi(e) => Some(e), + _ => None, + } + } +} + +/// Error during the transaction progress. +#[non_exhaustive] +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum TransactionProgressError { + #[error("Cannot get the next transaction progress update: {0}")] + CannotGetNextProgressUpdate(BackendError), + #[error("Error during transaction progress: {0}")] + TransactionStatusError(#[from] TransactionStatusError), + #[error( + "The transaction status stream unexpectedly ended, so we have no further transaction progress updates" + )] + UnexpectedEndOfTransactionStatusStream, +} + +impl TransactionProgressError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + TransactionProgressError::CannotGetNextProgressUpdate(e) => Some(e), + TransactionProgressError::TransactionStatusError(_) => None, + TransactionProgressError::UnexpectedEndOfTransactionStatusStream => None, + } + } +} + +/// An error emitted as the result of a transaction progress update. +#[derive(Clone, Debug, Eq, thiserror::Error, PartialEq)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum TransactionStatusError { + /// An error happened on the node that the transaction was submitted to. + #[error("Error handling transaction: {0}")] + Error(String), + /// The transaction was deemed invalid. + #[error("The transaction is not valid: {0}")] + Invalid(String), + /// The transaction was dropped. + #[error("The transaction was dropped: {0}")] + Dropped(String), +} + +/// Error fetching events for a just-submitted transaction +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum TransactionEventsError { + #[error( + "The block containing the submitted transaction ({block_hash}) could not be downloaded: {error}" + )] + CannotFetchBlockBody { + block_hash: Hex, + error: BackendError, + }, + #[error( + "Cannot find the the submitted transaction (hash: {transaction_hash}) in the block (hash: {block_hash}) it is supposed to be in." + )] + CannotFindTransactionInBlock { + block_hash: Hex, + transaction_hash: Hex, + }, + #[error("The block containing the submitted transaction ({block_hash}) could not be found")] + BlockNotFound { block_hash: Hex }, + #[error( + "Could not decode event at index {event_index} for the submitted transaction at block {block_hash}: {error}" + )] + CannotDecodeEventInBlock { + event_index: usize, + block_hash: Hex, + error: EventsError, + }, + #[error("Could not fetch events for the submitted transaction: {error}")] + CannotFetchEventsForTransaction { + block_hash: Hex, + transaction_hash: Hex, + error: EventsError, + }, + #[error("The transaction led to a DispatchError, but we failed to decode it: {error}")] + CannotDecodeDispatchError { + error: DispatchErrorDecodeError, + bytes: Vec, + }, + #[error("The transaction failed with the following dispatch error: {0}")] + ExtrinsicFailed(#[from] DispatchError), +} + +impl TransactionEventsError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + TransactionEventsError::CannotFetchBlockBody { error, .. } => Some(error), + TransactionEventsError::CannotDecodeEventInBlock { error, .. } + | TransactionEventsError::CannotFetchEventsForTransaction { error, .. } => { + error.backend_error() + } + _ => None, + } + } +} + +/// Error waiting for the transaction to be finalized and successful. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs, clippy::large_enum_variant)] +pub enum TransactionFinalizedSuccessError { + #[error("Could not finalize the transaction: {0}")] + FinalizationError(#[from] TransactionProgressError), + #[error("The transaction did not succeed: {0}")] + SuccessError(#[from] TransactionEventsError), +} + +impl TransactionFinalizedSuccessError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + TransactionFinalizedSuccessError::FinalizationError(e) => e.backend_error(), + TransactionFinalizedSuccessError::SuccessError(e) => e.backend_error(), + } + } +} + +/// Error decoding the [`DispatchError`] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ModuleErrorDetailsError { + #[error( + "Could not get details for the DispatchError: could not find pallet index {pallet_index}" + )] + PalletNotFound { pallet_index: u8 }, + #[error( + "Could not get details for the DispatchError: could not find error index {error_index} in pallet {pallet_name}" + )] + ErrorVariantNotFound { + pallet_name: String, + error_index: u8, + }, +} + +/// Error decoding the [`ModuleError`] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +#[error("Could not decode the DispatchError::Module payload into the given type: {0}")] +pub struct ModuleErrorDecodeError(scale_decode::Error); + +/// Error decoding the [`DispatchError`] +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum DispatchErrorDecodeError { + #[error( + "Could not decode the DispatchError: could not find the corresponding type ID in the metadata" + )] + DispatchErrorTypeIdNotFound, + #[error("Could not decode the DispatchError: {0}")] + CouldNotDecodeDispatchError(scale_decode::Error), + #[error("Could not decode the DispatchError::Module variant")] + CouldNotDecodeModuleError { + /// The bytes corresponding to the Module variant we were unable to decode: + bytes: Vec, + }, +} + +/// Error working with storage. +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageError { + #[error("The static storage address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find storage value: pallet with name {0} not found")] + PalletNameNotFound(String), + #[error( + "Storage entry '{entry_name}' not found in pallet {pallet_name} in the live chain metadata" + )] + StorageEntryNotFound { + pallet_name: String, + entry_name: String, + }, + #[error("Cannot obtain storage information from metadata: {0}")] + StorageInfoError(frame_decode::storage::StorageInfoError<'static>), + #[error("Cannot encode storage key: {0}")] + StorageKeyEncodeError(frame_decode::storage::StorageKeyEncodeError), + #[error("Cannot create a key to iterate over a plain entry")] + CannotIterPlainEntry { + pallet_name: String, + entry_name: String, + }, + #[error( + "Wrong number of key parts provided to iterate a storage address. We expected at most {max_expected} key parts but got {got} key parts" + )] + WrongNumberOfKeyPartsProvidedForIterating { max_expected: usize, got: usize }, + #[error( + "Wrong number of key parts provided to fetch a storage address. We expected {expected} key parts but got {got} key parts" + )] + WrongNumberOfKeyPartsProvidedForFetching { expected: usize, got: usize }, + #[error("Cannot access storage at latest block: Cannot fetch latest finalized block: {0}")] + CannotGetLatestFinalizedBlock(BackendError), + #[error( + "No storage value found at the given address, and no default value to fall back to using." + )] + NoValueFound, + #[error("Cannot fetch the storage value: {0}")] + CannotFetchValue(BackendError), + #[error("Cannot iterate storage values: {0}")] + CannotIterateValues(BackendError), + #[error("Encountered an error iterating over storage values: {0}")] + StreamFailure(BackendError), + #[error("Cannot decode the storage version for a given entry: {0}")] + CannotDecodeStorageVersion(codec::Error), +} + +impl StorageError { + fn backend_error(&self) -> Option<&BackendError> { + match self { + StorageError::CannotGetLatestFinalizedBlock(e) + | StorageError::CannotFetchValue(e) + | StorageError::CannotIterateValues(e) + | StorageError::StreamFailure(e) => Some(e), + _ => None, + } + } +} + +/// Something went wrong working with a constant. +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ConstantError { + #[error("The static constant address used is not compatible with the live chain")] + IncompatibleCodegen, + #[error("Can't find constant: pallet with name {0} not found")] + PalletNameNotFound(String), + #[error( + "Constant '{constant_name}' not found in pallet {pallet_name} in the live chain metadata" + )] + ConstantNameNotFound { + pallet_name: String, + constant_name: String, + }, + #[error("Failed to decode constant: {0}")] + CouldNotDecodeConstant(frame_decode::constants::ConstantDecodeError), + #[error("Cannot obtain constant information from metadata: {0}")] + ConstantInfoError(frame_decode::constants::ConstantInfoError<'static>), +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageKeyError { + #[error("Can't decode the storage key: {error}")] + StorageKeyDecodeError { + bytes: Vec, + error: frame_decode::storage::StorageKeyDecodeError, + }, + #[error("Can't decode the values from the storage key: {0}")] + CannotDecodeValuesInKey(frame_decode::storage::StorageKeyValueDecodeError), + #[error( + "Cannot decode storage key: there were leftover bytes, indicating that the decoding failed" + )] + LeftoverBytes { bytes: Vec }, + #[error("Can't decode a single value from the storage key part at index {index}: {error}")] + CannotDecodeValueInKey { + index: usize, + error: scale_decode::Error, + }, +} + +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum StorageValueError { + #[error("Cannot decode storage value: {0}")] + CannotDecode(frame_decode::storage::StorageValueDecodeError), + #[error( + "Cannot decode storage value: there were leftover bytes, indicating that the decoding failed" + )] + LeftoverBytes { bytes: Vec }, +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +#[error("Cannot decode extrinsic at index {extrinsic_index}: {error}")] +pub struct ExtrinsicDecodeErrorAt { + pub extrinsic_index: usize, + pub error: ExtrinsicDecodeErrorAtReason, +} + +#[derive(Debug, thiserror::Error)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ExtrinsicDecodeErrorAtReason { + #[error("{0}")] + DecodeError(frame_decode::extrinsics::ExtrinsicDecodeError), + #[error("Leftover bytes")] + LeftoverBytes(Vec), +} + +/// An error that can be emitted when trying to construct an instance of [`crate::config::ExtrinsicParams`], +/// encode data from the instance, or match on signed extensions. +#[derive(Debug, DeriveError)] +#[non_exhaustive] +#[allow(missing_docs)] +pub enum ExtrinsicParamsError { + #[error("Cannot find type id '{type_id} in the metadata (context: {context})")] + MissingTypeId { + /// Type ID. + type_id: u32, + /// Some arbitrary context to help narrow the source of the error. + context: &'static str, + }, + #[error("The chain expects a signed extension with the name {0}, but we did not provide one")] + UnknownTransactionExtension(String), + #[error("Error constructing extrinsic parameters: {0}")] + Custom(Box), +} + +impl ExtrinsicParamsError { + /// Create a custom [`ExtrinsicParamsError`] from a string. + pub fn custom>(error: S) -> Self { + let error: String = error.into(); + let error: Box = Box::from(error); + ExtrinsicParamsError::Custom(error) + } +} + +impl From for ExtrinsicParamsError { + fn from(value: core::convert::Infallible) -> Self { + match value {} + } +} diff --git a/new/src/error/dispatch_error.rs b/new/src/error/dispatch_error.rs new file mode 100644 index 0000000000..c7e9900321 --- /dev/null +++ b/new/src/error/dispatch_error.rs @@ -0,0 +1,358 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! A representation of the dispatch error; an error returned when +//! something fails in trying to submit/execute a transaction. + +use super::{DispatchErrorDecodeError, ModuleErrorDecodeError, ModuleErrorDetailsError}; +use subxt_metadata::Metadata; +use core::fmt::Debug; +use scale_decode::{DecodeAsType, TypeResolver, visitor::DecodeAsTypeResult}; +use std::{borrow::Cow, marker::PhantomData}; + +/// An error dispatching a transaction. +#[derive(Debug, thiserror::Error, PartialEq, Eq)] +#[allow(clippy::large_enum_variant)] +#[non_exhaustive] +pub enum DispatchError { + /// Some error occurred. + #[error("Some unknown error occurred.")] + Other, + /// Failed to lookup some data. + #[error("Failed to lookup some data.")] + CannotLookup, + /// A bad origin. + #[error("Bad origin.")] + BadOrigin, + /// A custom error in a module. + #[error("Pallet error: {0}")] + Module(ModuleError), + /// At least one consumer is remaining so the account cannot be destroyed. + #[error("At least one consumer is remaining so the account cannot be destroyed.")] + ConsumerRemaining, + /// There are no providers so the account cannot be created. + #[error("There are no providers so the account cannot be created.")] + NoProviders, + /// There are too many consumers so the account cannot be created. + #[error("There are too many consumers so the account cannot be created.")] + TooManyConsumers, + /// An error to do with tokens. + #[error("Token error: {0}")] + Token(TokenError), + /// An arithmetic error. + #[error("Arithmetic error: {0}")] + Arithmetic(ArithmeticError), + /// The number of transactional layers has been reached, or we are not in a transactional layer. + #[error("Transactional error: {0}")] + Transactional(TransactionalError), + /// Resources exhausted, e.g. attempt to read/write data which is too large to manipulate. + #[error( + "Resources exhausted, e.g. attempt to read/write data which is too large to manipulate." + )] + Exhausted, + /// The state is corrupt; this is generally not going to fix itself. + #[error("The state is corrupt; this is generally not going to fix itself.")] + Corruption, + /// Some resource (e.g. a preimage) is unavailable right now. This might fix itself later. + #[error( + "Some resource (e.g. a preimage) is unavailable right now. This might fix itself later." + )] + Unavailable, + /// Root origin is not allowed. + #[error("Root origin is not allowed.")] + RootNotAllowed, +} + +/// An error relating to tokens when dispatching a transaction. +#[derive(scale_decode::DecodeAsType, Debug, thiserror::Error, PartialEq, Eq)] +#[non_exhaustive] +pub enum TokenError { + /// Funds are unavailable. + #[error("Funds are unavailable.")] + FundsUnavailable, + /// Some part of the balance gives the only provider reference to the account and thus cannot be (re)moved. + #[error( + "Some part of the balance gives the only provider reference to the account and thus cannot be (re)moved." + )] + OnlyProvider, + /// Account cannot exist with the funds that would be given. + #[error("Account cannot exist with the funds that would be given.")] + BelowMinimum, + /// Account cannot be created. + #[error("Account cannot be created.")] + CannotCreate, + /// The asset in question is unknown. + #[error("The asset in question is unknown.")] + UnknownAsset, + /// Funds exist but are frozen. + #[error("Funds exist but are frozen.")] + Frozen, + /// Operation is not supported by the asset. + #[error("Operation is not supported by the asset.")] + Unsupported, + /// Account cannot be created for a held balance. + #[error("Account cannot be created for a held balance.")] + CannotCreateHold, + /// Withdrawal would cause unwanted loss of account. + #[error("Withdrawal would cause unwanted loss of account.")] + NotExpendable, + /// Account cannot receive the assets. + #[error("Account cannot receive the assets.")] + Blocked, +} + +/// An error relating to arithmetic when dispatching a transaction. +#[derive(scale_decode::DecodeAsType, Debug, thiserror::Error, PartialEq, Eq)] +#[non_exhaustive] +pub enum ArithmeticError { + /// Underflow. + #[error("Underflow.")] + Underflow, + /// Overflow. + #[error("Overflow.")] + Overflow, + /// Division by zero. + #[error("Division by zero.")] + DivisionByZero, +} + +/// An error relating to the transactional layers when dispatching a transaction. +#[derive(scale_decode::DecodeAsType, Debug, thiserror::Error, PartialEq, Eq)] +#[non_exhaustive] +pub enum TransactionalError { + /// Too many transactional layers have been spawned. + #[error("Too many transactional layers have been spawned.")] + LimitReached, + /// A transactional layer was expected, but does not exist. + #[error("A transactional layer was expected, but does not exist.")] + NoLayer, +} + +/// Details about a module error that has occurred. +#[derive(Clone, thiserror::Error)] +#[non_exhaustive] +pub struct ModuleError { + metadata: Metadata, + /// Bytes representation: + /// - `bytes[0]`: pallet index + /// - `bytes[1]`: error index + /// - `bytes[2..]`: 3 bytes specific for the module error + bytes: [u8; 5], +} + +impl PartialEq for ModuleError { + fn eq(&self, other: &Self) -> bool { + // A module error is the same if the raw underlying details are the same. + self.bytes == other.bytes + } +} + +impl Eq for ModuleError {} + +/// Custom `Debug` implementation, ignores the very large `metadata` field, using it instead (as +/// intended) to resolve the actual pallet and error names. This is much more useful for debugging. +impl Debug for ModuleError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let details = self.details_string(); + write!(f, "ModuleError(<{details}>)") + } +} + +impl std::fmt::Display for ModuleError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let details = self.details_string(); + write!(f, "{details}") + } +} + +impl ModuleError { + /// Return more details about this error. + pub fn details(&self) -> Result, ModuleErrorDetailsError> { + let pallet = self + .metadata + .pallet_by_error_index(self.pallet_index()) + .ok_or(ModuleErrorDetailsError::PalletNotFound { + pallet_index: self.pallet_index(), + })?; + + let variant = pallet + .error_variant_by_index(self.error_index()) + .ok_or_else(|| ModuleErrorDetailsError::ErrorVariantNotFound { + pallet_name: pallet.name().into(), + error_index: self.error_index(), + })?; + + Ok(ModuleErrorDetails { pallet, variant }) + } + + /// Return a formatted string of the resolved error details for debugging/display purposes. + pub fn details_string(&self) -> String { + match self.details() { + Ok(details) => format!( + "{pallet_name}::{variant_name}", + pallet_name = details.pallet.name(), + variant_name = details.variant.name, + ), + Err(_) => format!( + "Unknown pallet error '{bytes:?}' (pallet and error details cannot be retrieved)", + bytes = self.bytes + ), + } + } + + /// Return the underlying module error data that was decoded. + pub fn bytes(&self) -> [u8; 5] { + self.bytes + } + + /// Obtain the pallet index from the underlying byte data. + pub fn pallet_index(&self) -> u8 { + self.bytes[0] + } + + /// Obtain the error index from the underlying byte data. + pub fn error_index(&self) -> u8 { + self.bytes[1] + } + + /// Attempts to decode the ModuleError into the top outer Error enum. + pub fn as_root_error(&self) -> Result { + let decoded = E::decode_as_type( + &mut &self.bytes[..], + self.metadata.outer_enums().error_enum_ty(), + self.metadata.types(), + ) + .map_err(ModuleErrorDecodeError)?; + + Ok(decoded) + } +} + +/// Details about the module error. +pub struct ModuleErrorDetails<'a> { + /// The pallet that the error is in + pub pallet: subxt_metadata::PalletMetadata<'a>, + /// The variant representing the error + pub variant: &'a scale_info::Variant, +} + +impl DispatchError { + /// Attempt to decode a runtime [`DispatchError`]. + #[doc(hidden)] + pub fn decode_from<'a>( + bytes: impl Into>, + metadata: Metadata, + ) -> Result { + let bytes = bytes.into(); + let dispatch_error_ty_id = metadata + .dispatch_error_ty() + .ok_or(DispatchErrorDecodeError::DispatchErrorTypeIdNotFound)?; + + // The aim is to decode our bytes into roughly this shape. This is copied from + // `sp_runtime::DispatchError`; we need the variant names and any inner variant + // names/shapes to line up in order for decoding to be successful. + #[derive(scale_decode::DecodeAsType)] + enum DecodedDispatchError { + Other, + CannotLookup, + BadOrigin, + Module(DecodedModuleErrorBytes), + ConsumerRemaining, + NoProviders, + TooManyConsumers, + Token(TokenError), + Arithmetic(ArithmeticError), + Transactional(TransactionalError), + Exhausted, + Corruption, + Unavailable, + RootNotAllowed, + } + + // ModuleError is a bit special; we want to support being decoded from either + // a legacy format of 2 bytes, or a newer format of 5 bytes. So, just grab the bytes + // out when decoding to manually work with them. + struct DecodedModuleErrorBytes(Vec); + struct DecodedModuleErrorBytesVisitor(PhantomData); + impl scale_decode::Visitor for DecodedModuleErrorBytesVisitor { + type Error = scale_decode::Error; + type Value<'scale, 'info> = DecodedModuleErrorBytes; + type TypeResolver = R; + + fn unchecked_decode_as_type<'scale, 'info>( + self, + input: &mut &'scale [u8], + _type_id: R::TypeId, + _types: &'info R, + ) -> DecodeAsTypeResult, Self::Error>> + { + DecodeAsTypeResult::Decoded(Ok(DecodedModuleErrorBytes(input.to_vec()))) + } + } + + impl scale_decode::IntoVisitor for DecodedModuleErrorBytes { + type AnyVisitor = DecodedModuleErrorBytesVisitor; + fn into_visitor() -> DecodedModuleErrorBytesVisitor { + DecodedModuleErrorBytesVisitor(PhantomData) + } + } + + // Decode into our temporary error: + let decoded_dispatch_err = DecodedDispatchError::decode_as_type( + &mut &*bytes, + dispatch_error_ty_id, + metadata.types(), + ) + .map_err(DispatchErrorDecodeError::CouldNotDecodeDispatchError)?; + + // Convert into the outward-facing error, mainly by handling the Module variant. + let dispatch_error = match decoded_dispatch_err { + // Mostly we don't change anything from our decoded to our outward-facing error: + DecodedDispatchError::Other => DispatchError::Other, + DecodedDispatchError::CannotLookup => DispatchError::CannotLookup, + DecodedDispatchError::BadOrigin => DispatchError::BadOrigin, + DecodedDispatchError::ConsumerRemaining => DispatchError::ConsumerRemaining, + DecodedDispatchError::NoProviders => DispatchError::NoProviders, + DecodedDispatchError::TooManyConsumers => DispatchError::TooManyConsumers, + DecodedDispatchError::Token(val) => DispatchError::Token(val), + DecodedDispatchError::Arithmetic(val) => DispatchError::Arithmetic(val), + DecodedDispatchError::Transactional(val) => DispatchError::Transactional(val), + DecodedDispatchError::Exhausted => DispatchError::Exhausted, + DecodedDispatchError::Corruption => DispatchError::Corruption, + DecodedDispatchError::Unavailable => DispatchError::Unavailable, + DecodedDispatchError::RootNotAllowed => DispatchError::RootNotAllowed, + // But we apply custom logic to transform the module error into the outward facing version: + DecodedDispatchError::Module(module_bytes) => { + let module_bytes = module_bytes.0; + + // The old version is 2 bytes; a pallet and error index. + // The new version is 5 bytes; a pallet and error index and then 3 extra bytes. + let bytes = if module_bytes.len() == 2 { + [module_bytes[0], module_bytes[1], 0, 0, 0] + } else if module_bytes.len() == 5 { + [ + module_bytes[0], + module_bytes[1], + module_bytes[2], + module_bytes[3], + module_bytes[4], + ] + } else { + tracing::warn!( + "Can't decode error sp_runtime::DispatchError: bytes do not match known shapes" + ); + // Return _all_ of the bytes; every "unknown" return should be consistent. + return Err(DispatchErrorDecodeError::CouldNotDecodeModuleError { + bytes: bytes.to_vec(), + }); + }; + + // And return our outward-facing version: + DispatchError::Module(ModuleError { metadata, bytes }) + } + }; + + Ok(dispatch_error) + } +} diff --git a/new/src/error/hex.rs b/new/src/error/hex.rs new file mode 100644 index 0000000000..01d67a998e --- /dev/null +++ b/new/src/error/hex.rs @@ -0,0 +1,15 @@ +/// Display hex strings. +#[derive(PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] +pub struct Hex(String); + +impl> From for Hex { + fn from(value: T) -> Self { + Hex(hex::encode(value.as_ref())) + } +} + +impl std::fmt::Display for Hex { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} diff --git a/new/src/lib.rs b/new/src/lib.rs new file mode 100644 index 0000000000..96377e526b --- /dev/null +++ b/new/src/lib.rs @@ -0,0 +1,346 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +// TODO: REMOVE BEFORE MERGING. +#![allow(missing_docs)] + +//! Subxt is a library for interacting with Substrate based nodes. Using it looks something like this: +//! +//! ```rust,ignore +#![doc = include_str!("../examples/tx_basic.rs")] +//! ``` +//! +//! Take a look at [the Subxt guide](book) to learn more about how to use Subxt. + +#[cfg(any( + all(feature = "web", feature = "native"), + not(any(feature = "web", feature = "native")) +))] +compile_error!("subxt: exactly one of the 'web' and 'native' features should be used."); + +// Suppress an unused dependency warning because these are +// only used in example code snippets at the time of writing. +#[cfg(test)] +mod only_used_in_docs_or_tests { + use subxt_signer as _; + use tokio as _; + use tracing_subscriber as _; +} + +// // Internal helper macros +// #[macro_use] +// mod macros; + +pub mod config; +pub mod client; +pub mod error; +pub mod utils; +// pub mod book; +// pub mod backend; +// pub mod blocks; +// pub mod constants; +// pub mod custom_values; +// pub mod events; +// pub mod runtime_api; +// pub mod storage; +// pub mod tx; +// pub mod view_functions; + +// /// This module provides a [`Config`] type, which is used to define various +// /// types that are important in order to speak to a particular chain. +// /// [`SubstrateConfig`] provides a default set of these types suitable for the +// /// default Substrate node implementation, and [`PolkadotConfig`] for a +// /// Polkadot node. +// pub mod config { +// pub use subxt_core::config::{ +// Config, DefaultExtrinsicParams, DefaultExtrinsicParamsBuilder, ExtrinsicParams, +// ExtrinsicParamsEncoder, Hash, HashFor, Hasher, Header, PolkadotConfig, +// PolkadotExtrinsicParams, SubstrateConfig, SubstrateExtrinsicParams, TransactionExtension, +// polkadot, substrate, transaction_extensions, +// }; +// pub use subxt_core::error::ExtrinsicParamsError; +// } + +// /// Types representing the metadata obtained from a node. +// pub mod metadata { +// pub use subxt_metadata::*; +// } + +// /// Submit dynamic transactions. +// pub mod dynamic { +// pub use subxt_core::dynamic::*; +// } + +// // Expose light client bits +// cfg_unstable_light_client! { +// pub use subxt_lightclient as lightclient; +// } + +// // Expose a few of the most common types at root, +// // but leave most types behind their respective modules. +// pub use crate::{ +// client::{OfflineClient, OnlineClient}, +// config::{Config, PolkadotConfig, SubstrateConfig}, +// error::Error, +// metadata::Metadata, +// }; + +/// Re-export external crates that are made use of in the subxt API. +pub mod ext { + pub use codec; + pub use frame_metadata; + pub use futures; + pub use scale_bits; + pub use scale_decode; + pub use scale_encode; + pub use scale_value; + pub use subxt_rpcs; + + #[cfg(feature = "jsonrpsee")] + pub use jsonrpsee; +} + +/// Generate a strongly typed API for interacting with a Substrate runtime from its metadata of WASM. +/// +/// # Metadata +/// +/// First, you'll need to get hold of some metadata for the node you'd like to interact with. One +/// way to do this is by using the `subxt` CLI tool: +/// +/// ```bash +/// # Install the CLI tool: +/// cargo install subxt-cli +/// # Use it to download metadata (in this case, from a node running locally) +/// subxt metadata > polkadot_metadata.scale +/// ``` +/// +/// Run `subxt metadata --help` for more options. +/// +/// # Basic usage +/// +/// We can generate an interface to a chain given either: +/// - A locally saved SCALE encodedĀ metadata file (see above) for that chain, +/// - The Runtime WASM for that chain, or +/// - A URL pointing at the JSON-RPC interface for a node on that chain. +/// +/// In each case, the `subxt` macro will use this data to populate the annotated module with all of the methods +/// and types required for interacting with the chain that the Runtime/metadata was loaded from. +/// +/// Let's look at each of these: +/// +/// ## Using a locally saved metadata file +/// +/// Annotate a Rust module with the `subxt` attribute referencing a metadata file like so: +/// +/// ```rust,no_run,standalone_crate +/// #[subxt::subxt( +/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", +/// )] +/// mod polkadot {} +/// ``` +/// +/// ## Using a WASM runtime via `runtime_path = "..."` +/// +/// This requires the `runtime-wasm-path` feature flag. +/// +/// Annotate a Rust module with the `subxt` attribute referencing some runtime WASM like so: +/// +/// ```rust,ignore +/// #[subxt::subxt( +/// runtime_path = "../artifacts/westend_runtime.wasm", +/// )] +/// mod polkadot {} +/// ``` +/// +/// ## Connecting to a node to download metadata via `runtime_metadata_insecure_url = "..."` +/// +/// This will, at compile time, connect to the JSON-RPC interface for some node at the URL given, +/// download the metadata from it, and use that. This can be useful in CI, but is **not recommended** +/// in production code, because: +/// +/// - The compilation time is increased since we have to download metadata from a URL each time. If +/// the node we connect to is unresponsive, this will be slow or could fail. +/// - The metadata may change from what is expected without notice, causing compilation to fail if +/// it leads to changes in the generated interfaces that are being used. +/// - The node that you connect to could be malicious and provide incorrect metadata for the chain. +/// +/// ```rust,ignore +/// #[subxt::subxt( +/// runtime_metadata_insecure_url = "wss://rpc.polkadot.io:443" +/// )] +/// mod polkadot {} +/// ``` +/// +/// # Configuration +/// +/// This macro supports a number of attributes to configure what is generated: +/// +/// ## `crate = "..."` +/// +/// Use this attribute to specify a custom path to the `subxt_core` crate: +/// +/// ```rust,standalone_crate +/// # pub extern crate subxt_core; +/// # pub mod path { pub mod to { pub use subxt_core; } } +/// # fn main() {} +/// #[subxt::subxt( +/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", +/// crate = "crate::path::to::subxt_core" +/// )] +/// mod polkadot {} +/// ``` +/// +/// This is useful if you write a library which uses this macro, but don't want to force users to depend on `subxt` +/// at the top level too. By default the path `::subxt` is used. +/// +/// ## `substitute_type(path = "...", with = "...")` +/// +/// This attribute replaces any reference to the generated type at the path given by `path` with a +/// reference to the path given by `with`. +/// +/// ```rust,standalone_crate +/// #[subxt::subxt( +/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", +/// substitute_type(path = "sp_arithmetic::per_things::Perbill", with = "crate::Foo") +/// )] +/// mod polkadot {} +/// +/// # #[derive( +/// # scale_encode::EncodeAsType, +/// # scale_decode::DecodeAsType, +/// # codec::Encode, +/// # codec::Decode, +/// # Clone, +/// # Debug, +/// # )] +/// // In reality this needs some traits implementing on +/// // it to allow it to be used in place of Perbill: +/// pub struct Foo(u32); +/// # impl codec::CompactAs for Foo { +/// # type As = u32; +/// # fn encode_as(&self) -> &Self::As { +/// # &self.0 +/// # } +/// # fn decode_from(x: Self::As) -> Result { +/// # Ok(Foo(x)) +/// # } +/// # } +/// # impl From> for Foo { +/// # fn from(v: codec::Compact) -> Foo { +/// # v.0 +/// # } +/// # } +/// # fn main() {} +/// ``` +/// +/// If the type you're substituting contains generic parameters, you can "pattern match" on those, and +/// make use of them in the substituted type, like so: +/// +/// ```rust,no_run,standalone_crate +/// #[subxt::subxt( +/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", +/// substitute_type( +/// path = "sp_runtime::multiaddress::MultiAddress", +/// with = "::subxt::utils::Static>" +/// ) +/// )] +/// mod polkadot {} +/// ``` +/// +/// The above is also an example of using the [`crate::utils::Static`] type to wrap some type which doesn't +/// on it's own implement [`scale_encode::EncodeAsType`] or [`scale_decode::DecodeAsType`], which are required traits +/// for any substitute type to implement by default. +/// +/// ## `derive_for_all_types = "..."` +/// +/// By default, all generated types derive a small set of traits. This attribute allows you to derive additional +/// traits on all generated types: +/// +/// ```rust,no_run,standalone_crate +/// #[subxt::subxt( +/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", +/// derive_for_all_types = "Eq, PartialEq" +/// )] +/// mod polkadot {} +/// ``` +/// +/// Any substituted types (including the default substitutes) must also implement these traits in order to avoid errors +/// here. +/// +/// ## `derive_for_type(path = "...", derive = "...")` +/// +/// Unlike the above, which derives some trait on every generated type, this attribute allows you to derive traits only +/// for specific types. Note that any types which are used inside the specified type may also need to derive the same traits. +/// +/// ```rust,no_run,standalone_crate +/// #[subxt::subxt( +/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", +/// derive_for_all_types = "Eq, PartialEq", +/// derive_for_type(path = "frame_support::PalletId", derive = "Ord, PartialOrd"), +/// derive_for_type(path = "sp_runtime::ModuleError", derive = "Hash"), +/// )] +/// mod polkadot {} +/// ``` +/// +/// ## `generate_docs` +/// +/// By default, documentation is not generated via the macro, since IDEs do not typically make use of it. This attribute +/// forces documentation to be generated, too. +/// +/// ```rust,no_run,standalone_crate +/// #[subxt::subxt( +/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", +/// generate_docs +/// )] +/// mod polkadot {} +/// ``` +/// +/// ## `runtime_types_only` +/// +/// By default, the macro will generate various interfaces to make using Subxt simpler in addition with any types that need +/// generating to make this possible. This attribute makes the codegen only generate the types and not the Subxt interface. +/// +/// ```rust,no_run,standalone_crate +/// #[subxt::subxt( +/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", +/// runtime_types_only +/// )] +/// mod polkadot {} +/// ``` +/// +/// ## `no_default_derives` +/// +/// By default, the macro will add all derives necessary for the generated code to play nicely with Subxt. Adding this attribute +/// removes all default derives. +/// +/// ```rust,no_run,standalone_crate +/// #[subxt::subxt( +/// runtime_metadata_path = "../artifacts/polkadot_metadata_full.scale", +/// runtime_types_only, +/// no_default_derives, +/// derive_for_all_types="codec::Encode, codec::Decode" +/// )] +/// mod polkadot {} +/// ``` +/// +/// **Note**: At the moment, you must derive at least one of `codec::Encode` or `codec::Decode` or `scale_encode::EncodeAsType` or +/// `scale_decode::DecodeAsType` (because we add `#[codec(..)]` attributes on some fields/types during codegen), and you must use this +/// feature in conjunction with `runtime_types_only` (or manually specify a bunch of defaults to make codegen work properly when +/// generating the subxt interfaces). +/// +/// ## `unstable_metadata` +/// +/// This attribute works only in combination with `runtime_metadata_insecure_url`. By default, the macro will fetch the latest stable +/// version of the metadata from the target node. This attribute makes the codegen attempt to fetch the unstable version of +/// the metadata first. This is **not recommended** in production code, since the unstable metadata a node is providing is likely +/// to be incompatible with Subxt. +/// +/// ```rust,ignore +/// #[subxt::subxt( +/// runtime_metadata_insecure_url = "wss://rpc.polkadot.io:443", +/// unstable_metadata +/// )] +/// mod polkadot {} +/// ``` +pub use subxt_macro::subxt; diff --git a/new/src/utils.rs b/new/src/utils.rs new file mode 100644 index 0000000000..ef92650ea5 --- /dev/null +++ b/new/src/utils.rs @@ -0,0 +1,79 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Miscellaneous utility helpers. + +mod account_id; +mod account_id20; +pub mod bits; +mod era; +mod multi_address; +mod multi_signature; +mod static_type; +mod unchecked_extrinsic; +mod wrapper_opaque; +mod yesnomaybe; +mod range_map; + +use codec::{Compact, Decode, Encode}; +use derive_where::derive_where; + +pub use range_map::{ RangeMap, RangeMapBuilder, RangeMapError }; +pub use account_id::AccountId32; +pub use account_id20::AccountId20; +pub use era::Era; +pub use multi_address::MultiAddress; +pub use multi_signature::MultiSignature; +pub use primitive_types::{H160, H256, H512}; +pub use static_type::Static; +pub use unchecked_extrinsic::UncheckedExtrinsic; +pub use wrapper_opaque::WrapperKeepOpaque; +pub use yesnomaybe::{Maybe, No, NoMaybe, Yes, YesMaybe, YesNo}; + +/// Wraps an already encoded byte vector, prevents being encoded as a raw byte vector as part of +/// the transaction payload +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] +pub struct Encoded(pub Vec); + +impl codec::Encode for Encoded { + fn encode(&self) -> Vec { + self.0.to_owned() + } +} + +/// Decodes a compact encoded value from the beginning of the provided bytes, +/// returning the value and any remaining bytes. +pub fn strip_compact_prefix(bytes: &[u8]) -> Result<(u64, &[u8]), codec::Error> { + let cursor = &mut &*bytes; + let val = >::decode(cursor)?; + Ok((val.0, *cursor)) +} + +/// A version of [`core::marker::PhantomData`] that is also Send and Sync (which is fine +/// because regardless of the generic param, it is always possible to Send + Sync this +/// 0 size type). +#[derive(Encode, Decode, scale_info::TypeInfo)] +#[derive_where(Clone, PartialEq, Debug, Eq, Default, Hash)] +#[scale_info(skip_type_params(T))] +#[doc(hidden)] +pub struct PhantomDataSendSync(core::marker::PhantomData); + +impl PhantomDataSendSync { + pub fn new() -> Self { + Self(core::marker::PhantomData) + } +} + +unsafe impl Send for PhantomDataSendSync {} +unsafe impl Sync for PhantomDataSendSync {} + +/// This represents a key-value collection and is SCALE compatible +/// with collections like BTreeMap. This has the same type params +/// as `BTreeMap` which allows us to easily swap the two during codegen. +pub type KeyedVec = Vec<(K, V)>; + +/// A quick helper to encode some bytes to hex. +pub fn to_hex(bytes: impl AsRef<[u8]>) -> String { + format!("0x{}", hex::encode(bytes.as_ref())) +} diff --git a/new/src/utils/account_id.rs b/new/src/utils/account_id.rs new file mode 100644 index 0000000000..f459ae511d --- /dev/null +++ b/new/src/utils/account_id.rs @@ -0,0 +1,188 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! The "default" Substrate/Polkadot AccountId. This is used in codegen, as well as signing related bits. +//! This doesn't contain much functionality itself, but is easy to convert to/from an `sp_core::AccountId32` +//! for instance, to gain functionality without forcing a dependency on Substrate crates here. + +use codec::{Decode, Encode}; +use serde::{Deserialize, Serialize}; +use thiserror::Error as DeriveError; + +/// A 32-byte cryptographic identifier. This is a simplified version of Substrate's +/// `sp_core::crypto::AccountId32`. To obtain more functionality, convert this into +/// that type. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + scale_info::TypeInfo, +)] +pub struct AccountId32(pub [u8; 32]); + +impl AsRef<[u8]> for AccountId32 { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsRef<[u8; 32]> for AccountId32 { + fn as_ref(&self) -> &[u8; 32] { + &self.0 + } +} + +impl From<[u8; 32]> for AccountId32 { + fn from(x: [u8; 32]) -> Self { + AccountId32(x) + } +} + +impl AccountId32 { + // Return the ss58-check string for this key. Adapted from `sp_core::crypto`. We need this to + // serialize our account appropriately but otherwise don't care. + fn to_ss58check(&self) -> String { + // For serializing to a string to obtain the account nonce, we use the default substrate + // prefix (since we have no way to otherwise pick one). It doesn't really matter, since when + // it's deserialized back in system_accountNextIndex, we ignore this (so long as it's valid). + const SUBSTRATE_SS58_PREFIX: u8 = 42; + // prefix <= 63 just take up one byte at the start: + let mut v = vec![SUBSTRATE_SS58_PREFIX]; + // then push the account ID bytes. + v.extend(self.0); + // then push a 2 byte checksum of what we have so far. + let r = ss58hash(&v); + v.extend(&r[0..2]); + // then encode to base58. + use base58::ToBase58; + v.to_base58() + } + + // This isn't strictly needed, but to give our AccountId32 a little more usefulness, we also + // implement the logic needed to decode an AccountId32 from an SS58 encoded string. This is exposed + // via a `FromStr` impl. + fn from_ss58check(s: &str) -> Result { + const CHECKSUM_LEN: usize = 2; + let body_len = 32; + + use base58::FromBase58; + let data = s.from_base58().map_err(|_| FromSs58Error::BadBase58)?; + if data.len() < 2 { + return Err(FromSs58Error::BadLength); + } + let prefix_len = match data[0] { + 0..=63 => 1, + 64..=127 => 2, + _ => return Err(FromSs58Error::InvalidPrefix), + }; + if data.len() != prefix_len + body_len + CHECKSUM_LEN { + return Err(FromSs58Error::BadLength); + } + let hash = ss58hash(&data[0..body_len + prefix_len]); + let checksum = &hash[0..CHECKSUM_LEN]; + if data[body_len + prefix_len..body_len + prefix_len + CHECKSUM_LEN] != *checksum { + // Invalid checksum. + return Err(FromSs58Error::InvalidChecksum); + } + + let result = data[prefix_len..body_len + prefix_len] + .try_into() + .map_err(|_| FromSs58Error::BadLength)?; + Ok(AccountId32(result)) + } +} + +/// An error obtained from trying to interpret an SS58 encoded string into an AccountId32 +#[derive(Clone, Copy, Eq, PartialEq, Debug, DeriveError)] +#[allow(missing_docs)] +pub enum FromSs58Error { + #[error("Base 58 requirement is violated")] + BadBase58, + #[error("Length is bad")] + BadLength, + #[error("Invalid checksum")] + InvalidChecksum, + #[error("Invalid SS58 prefix byte.")] + InvalidPrefix, +} + +// We do this just to get a checksum to help verify the validity of the address in to_ss58check +fn ss58hash(data: &[u8]) -> Vec { + use blake2::{Blake2b512, Digest}; + const PREFIX: &[u8] = b"SS58PRE"; + let mut ctx = Blake2b512::new(); + ctx.update(PREFIX); + ctx.update(data); + ctx.finalize().to_vec() +} + +impl Serialize for AccountId32 { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } +} + +impl<'de> Deserialize<'de> for AccountId32 { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + AccountId32::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| serde::de::Error::custom(format!("{e:?}"))) + } +} + +impl core::fmt::Display for AccountId32 { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } +} + +impl core::str::FromStr for AccountId32 { + type Err = FromSs58Error; + fn from_str(s: &str) -> Result { + AccountId32::from_ss58check(s) + } +} + +#[cfg(test)] +mod test { + use super::*; + use sp_core::{self, crypto::Ss58Codec}; + use sp_keyring::sr25519::Keyring; + + #[test] + fn ss58_is_compatible_with_substrate_impl() { + let keyrings = vec![Keyring::Alice, Keyring::Bob, Keyring::Charlie]; + + for keyring in keyrings { + let substrate_account = keyring.to_account_id(); + let local_account = AccountId32(substrate_account.clone().into()); + + // Both should encode to ss58 the same way: + let substrate_ss58 = substrate_account.to_ss58check(); + assert_eq!(substrate_ss58, local_account.to_ss58check()); + + // Both should decode from ss58 back to the same: + assert_eq!( + sp_core::crypto::AccountId32::from_ss58check(&substrate_ss58).unwrap(), + substrate_account + ); + assert_eq!( + AccountId32::from_ss58check(&substrate_ss58).unwrap(), + local_account + ); + } + } +} diff --git a/new/src/utils/account_id20.rs b/new/src/utils/account_id20.rs new file mode 100644 index 0000000000..5eb2f815b0 --- /dev/null +++ b/new/src/utils/account_id20.rs @@ -0,0 +1,150 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! `AccountId20` is a representation of Ethereum address derived from hashing the public key. + +use codec::{Decode, Encode}; +use keccak_hash::keccak; +use serde::{Deserialize, Serialize}; +use thiserror::Error as DeriveError; + +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + scale_info::TypeInfo, +)] +/// Ethereum-compatible `AccountId`. +pub struct AccountId20(pub [u8; 20]); + +impl AsRef<[u8]> for AccountId20 { + fn as_ref(&self) -> &[u8] { + &self.0[..] + } +} + +impl AsRef<[u8; 20]> for AccountId20 { + fn as_ref(&self) -> &[u8; 20] { + &self.0 + } +} + +impl From<[u8; 20]> for AccountId20 { + fn from(x: [u8; 20]) -> Self { + AccountId20(x) + } +} + +impl AccountId20 { + /// Convert to a public key hash + pub fn checksum(&self) -> String { + let hex_address = hex::encode(self.0); + let hash = keccak(hex_address.as_bytes()); + + let mut checksum_address = String::with_capacity(42); + checksum_address.push_str("0x"); + + for (i, ch) in hex_address.chars().enumerate() { + // Get the corresponding nibble from the hash + let nibble = (hash[i / 2] >> (if i % 2 == 0 { 4 } else { 0 })) & 0xf; + + if nibble >= 8 { + checksum_address.push(ch.to_ascii_uppercase()); + } else { + checksum_address.push(ch); + } + } + + checksum_address + } +} + +/// An error obtained from trying to interpret a hex encoded string into an AccountId20 +#[derive(Clone, Copy, Eq, PartialEq, Debug, DeriveError)] +#[allow(missing_docs)] +pub enum FromChecksumError { + #[error("Length is bad")] + BadLength, + #[error("Invalid checksum")] + InvalidChecksum, + #[error("Invalid checksum prefix byte.")] + InvalidPrefix, +} + +impl Serialize for AccountId20 { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.checksum()) + } +} + +impl<'de> Deserialize<'de> for AccountId20 { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + String::deserialize(deserializer)? + .parse::() + .map_err(|e| serde::de::Error::custom(format!("{e:?}"))) + } +} + +impl core::fmt::Display for AccountId20 { + fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result { + write!(f, "{}", self.checksum()) + } +} + +impl core::str::FromStr for AccountId20 { + type Err = FromChecksumError; + fn from_str(s: &str) -> Result { + if s.len() != 42 { + return Err(FromChecksumError::BadLength); + } + if !s.starts_with("0x") { + return Err(FromChecksumError::InvalidPrefix); + } + hex::decode(&s.as_bytes()[2..]) + .map_err(|_| FromChecksumError::InvalidChecksum)? + .try_into() + .map(AccountId20) + .map_err(|_| FromChecksumError::BadLength) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn deserialisation() { + let key_hashes = vec![ + "0xf24FF3a9CF04c71Dbc94D0b566f7A27B94566cac", + "0x3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0", + "0x798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc", + "0x773539d4Ac0e786233D90A233654ccEE26a613D9", + "0xFf64d3F6efE2317EE2807d223a0Bdc4c0c49dfDB", + "0xC0F0f4ab324C46e55D02D0033343B4Be8A55532d", + ]; + + for key_hash in key_hashes { + let parsed: AccountId20 = key_hash.parse().expect("Failed to parse"); + + let encoded = parsed.checksum(); + + // `encoded` should be equal to the initial key_hash + assert_eq!(encoded, key_hash); + } + } +} diff --git a/new/src/utils/bits.rs b/new/src/utils/bits.rs new file mode 100644 index 0000000000..5b5ba9226c --- /dev/null +++ b/new/src/utils/bits.rs @@ -0,0 +1,264 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! Generic `scale_bits` over `bitvec`-like `BitOrder` and `BitFormat` types. + +use codec::{Compact, Input}; +use core::marker::PhantomData; +use scale_bits::{ + Bits, + scale::format::{Format, OrderFormat, StoreFormat}, +}; +use scale_decode::{IntoVisitor, TypeResolver}; + +/// Associates `bitvec::store::BitStore` trait with corresponding, type-erased `scale_bits::StoreFormat` enum. +/// +/// Used to decode bit sequences by providing `scale_bits::StoreFormat` using +/// `bitvec`-like type type parameters. +pub trait BitStore { + /// Corresponding `scale_bits::StoreFormat` value. + const FORMAT: StoreFormat; + /// Number of bits that the backing store types holds. + const BITS: u32; +} +macro_rules! impl_store { + ($ty:ident, $wrapped:ty) => { + impl BitStore for $wrapped { + const FORMAT: StoreFormat = StoreFormat::$ty; + const BITS: u32 = <$wrapped>::BITS; + } + }; +} +impl_store!(U8, u8); +impl_store!(U16, u16); +impl_store!(U32, u32); +impl_store!(U64, u64); + +/// Associates `bitvec::order::BitOrder` trait with corresponding, type-erased `scale_bits::OrderFormat` enum. +/// +/// Used to decode bit sequences in runtime by providing `scale_bits::OrderFormat` using +/// `bitvec`-like type type parameters. +pub trait BitOrder { + /// Corresponding `scale_bits::OrderFormat` value. + const FORMAT: OrderFormat; +} +macro_rules! impl_order { + ($ty:ident) => { + #[doc = concat!("Type-level value that corresponds to `scale_bits::OrderFormat::", stringify!($ty), "` at run-time")] + #[doc = concat!(" and `bitvec::order::BitOrder::", stringify!($ty), "` at the type level.")] + #[derive(Clone, Debug, PartialEq, Eq)] + pub enum $ty {} + impl BitOrder for $ty { + const FORMAT: OrderFormat = OrderFormat::$ty; + } + }; +} +impl_order!(Lsb0); +impl_order!(Msb0); + +/// Constructs a run-time format parameters based on the corresponding type-level parameters. +fn bit_format() -> Format { + Format { + order: Order::FORMAT, + store: Store::FORMAT, + } +} + +/// `scale_bits::Bits` generic over the bit store (`u8`/`u16`/`u32`/`u64`) and bit order (LSB, MSB) +/// used for SCALE encoding/decoding. Uses `scale_bits::Bits`-default `u8` and LSB format underneath. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DecodedBits { + bits: Bits, + _marker: PhantomData<(Store, Order)>, +} + +impl DecodedBits { + /// Extracts the underlying `scale_bits::Bits` value. + pub fn into_bits(self) -> Bits { + self.bits + } + + /// References the underlying `scale_bits::Bits` value. + pub fn as_bits(&self) -> &Bits { + &self.bits + } +} + +impl core::iter::FromIterator for DecodedBits { + fn from_iter>(iter: T) -> Self { + DecodedBits { + bits: Bits::from_iter(iter), + _marker: PhantomData, + } + } +} + +impl codec::Decode for DecodedBits { + fn decode(input: &mut I) -> Result { + /// Equivalent of `BitSlice::MAX_BITS` on 32bit machine. + const ARCH32BIT_BITSLICE_MAX_BITS: u32 = 0x1fff_ffff; + + let Compact(bits) = >::decode(input)?; + // Otherwise it is impossible to store it on 32bit machine. + if bits > ARCH32BIT_BITSLICE_MAX_BITS { + return Err("Attempt to decode a BitVec with too many bits".into()); + } + // NOTE: Replace with `bits.div_ceil(Store::BITS)` if `int_roundings` is stabilised + let elements = (bits / Store::BITS) + u32::from(bits % Store::BITS != 0); + let bytes_in_elem = Store::BITS.saturating_div(u8::BITS); + let bytes_needed = (elements * bytes_in_elem) as usize; + + // NOTE: We could reduce allocations if it would be possible to directly + // decode from an `Input` type using a custom format (rather than default ) + // for the `Bits` type. + let mut storage = codec::Encode::encode(&Compact(bits)); + let prefix_len = storage.len(); + storage.reserve_exact(bytes_needed); + storage.extend(vec![0; bytes_needed]); + input.read(&mut storage[prefix_len..])?; + + let decoder = scale_bits::decode_using_format_from(&storage, bit_format::())?; + let bits = decoder.collect::, _>>()?; + let bits = Bits::from_iter(bits); + + Ok(DecodedBits { + bits, + _marker: PhantomData, + }) + } +} + +impl codec::Encode for DecodedBits { + fn size_hint(&self) -> usize { + self.bits.size_hint() + } + + fn encoded_size(&self) -> usize { + self.bits.encoded_size() + } + + fn encode(&self) -> Vec { + scale_bits::encode_using_format(self.bits.iter(), bit_format::()) + } +} + +#[doc(hidden)] +pub struct DecodedBitsVisitor(core::marker::PhantomData<(S, O, R)>); + +impl scale_decode::Visitor for DecodedBitsVisitor { + type Value<'scale, 'info> = DecodedBits; + type Error = scale_decode::Error; + type TypeResolver = R; + + fn unchecked_decode_as_type<'scale, 'info>( + self, + input: &mut &'scale [u8], + type_id: R::TypeId, + types: &'info R, + ) -> scale_decode::visitor::DecodeAsTypeResult< + Self, + Result, Self::Error>, + > { + let res = + scale_decode::visitor::decode_with_visitor(input, type_id, types, Bits::into_visitor()) + .map(|bits| DecodedBits { + bits, + _marker: PhantomData, + }); + scale_decode::visitor::DecodeAsTypeResult::Decoded(res) + } +} +impl scale_decode::IntoVisitor for DecodedBits { + type AnyVisitor = DecodedBitsVisitor; + fn into_visitor() -> DecodedBitsVisitor { + DecodedBitsVisitor(PhantomData) + } +} + +impl scale_encode::EncodeAsType for DecodedBits { + fn encode_as_type_to( + &self, + type_id: R::TypeId, + types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + self.bits.encode_as_type_to(type_id, types, out) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use core::fmt::Debug; + + use bitvec::vec::BitVec; + use codec::Decode as _; + + // NOTE: We don't use `bitvec::order` types in our implementation, since we + // don't want to depend on `bitvec`. Rather than reimplementing the unsafe + // trait on our types here for testing purposes, we simply convert and + // delegate to `bitvec`'s own types. + trait ToBitVec { + type Order: bitvec::order::BitOrder; + } + impl ToBitVec for Lsb0 { + type Order = bitvec::order::Lsb0; + } + impl ToBitVec for Msb0 { + type Order = bitvec::order::Msb0; + } + + fn scales_like_bitvec_and_roundtrips< + 'a, + Store: BitStore + bitvec::store::BitStore + PartialEq, + Order: BitOrder + ToBitVec + Debug + PartialEq, + >( + input: impl IntoIterator, + ) where + BitVec::Order>: codec::Encode + codec::Decode, + { + let input: Vec<_> = input.into_iter().copied().collect(); + + let decoded_bits = DecodedBits::::from_iter(input.clone()); + let bitvec = BitVec::::Order>::from_iter(input); + + let decoded_bits_encoded = codec::Encode::encode(&decoded_bits); + let bitvec_encoded = codec::Encode::encode(&bitvec); + assert_eq!(decoded_bits_encoded, bitvec_encoded); + + let decoded_bits_decoded = + DecodedBits::::decode(&mut &decoded_bits_encoded[..]) + .expect("SCALE-encoding DecodedBits to roundtrip"); + let bitvec_decoded = + BitVec::::Order>::decode(&mut &bitvec_encoded[..]) + .expect("SCALE-encoding BitVec to roundtrip"); + assert_eq!(decoded_bits, decoded_bits_decoded); + assert_eq!(bitvec, bitvec_decoded); + } + + #[test] + fn decoded_bitvec_scales_and_roundtrips() { + let test_cases = [ + vec![], + vec![true], + vec![false], + vec![true, false, true], + vec![true, false, true, false, false, false, false, false, true], + [vec![true; 5], vec![false; 5], vec![true; 1], vec![false; 3]].concat(), + [vec![true; 9], vec![false; 9], vec![true; 9], vec![false; 9]].concat(), + ]; + + for test_case in &test_cases { + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + scales_like_bitvec_and_roundtrips::(test_case); + } + } +} diff --git a/new/src/utils/era.rs b/new/src/utils/era.rs new file mode 100644 index 0000000000..a6e92edbb8 --- /dev/null +++ b/new/src/utils/era.rs @@ -0,0 +1,233 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use codec::{Decode, Encode}; +use scale_decode::{ + IntoVisitor, TypeResolver, Visitor, + ext::scale_type_resolver, + visitor::{TypeIdFor, types::Composite, types::Variant}, +}; +use scale_encode::EncodeAsType; + +// Dev note: This and related bits taken from `sp_runtime::generic::Era` +/// An era to describe the longevity of a transaction. +#[derive( + PartialEq, + Default, + Eq, + Clone, + Copy, + Debug, + serde::Serialize, + serde::Deserialize, + scale_info::TypeInfo, +)] +pub enum Era { + /// The transaction is valid forever. The genesis hash must be present in the signed content. + #[default] + Immortal, + + /// The transaction will expire. Use [`Era::mortal`] to construct this with correct values. + /// + /// When used on `FRAME`-based runtimes, `period` cannot exceed `BlockHashCount` parameter + /// of `system` module. + Mortal { + /// The number of blocks that the tx will be valid for after the checkpoint block + /// hash found in the signer payload. + period: u64, + /// The phase in the period that this transaction's lifetime begins (and, importantly, + /// implies which block hash is included in the signature material). If the `period` is + /// greater than 1 << 12, then it will be a factor of the times greater than 1<<12 that + /// `period` is. + phase: u64, + }, +} + +// E.g. with period == 4: +// 0 10 20 30 40 +// 0123456789012345678901234567890123456789012 +// |...| +// authored -/ \- expiry +// phase = 1 +// n = Q(current - phase, period) + phase +impl Era { + /// Create a new era based on a period (which should be a power of two between 4 and 65536 + /// inclusive) and a block number on which it should start (or, for long periods, be shortly + /// after the start). + /// + /// If using `Era` in the context of `FRAME` runtime, make sure that `period` + /// does not exceed `BlockHashCount` parameter passed to `system` module, since that + /// prunes old blocks and renders transactions immediately invalid. + pub fn mortal(period: u64, current: u64) -> Self { + let period = period + .checked_next_power_of_two() + .unwrap_or(1 << 16) + .clamp(4, 1 << 16); + let phase = current % period; + let quantize_factor = (period >> 12).max(1); + let quantized_phase = phase / quantize_factor * quantize_factor; + + Self::Mortal { + period, + phase: quantized_phase, + } + } +} + +// Both copied from `sp_runtime::generic::Era`; this is the wire interface and so +// it's really the most important bit here. +impl codec::Encode for Era { + fn encode_to(&self, output: &mut T) { + match self { + Self::Immortal => output.push_byte(0), + Self::Mortal { period, phase } => { + let quantize_factor = (*period >> 12).max(1); + let encoded = (period.trailing_zeros() - 1).clamp(1, 15) as u16 + | ((phase / quantize_factor) << 4) as u16; + encoded.encode_to(output); + } + } + } +} +impl codec::Decode for Era { + fn decode(input: &mut I) -> Result { + let first = input.read_byte()?; + if first == 0 { + Ok(Self::Immortal) + } else { + let encoded = first as u64 + ((input.read_byte()? as u64) << 8); + let period = 2 << (encoded % (1 << 4)); + let quantize_factor = (period >> 12).max(1); + let phase = (encoded >> 4) * quantize_factor; + if period >= 4 && phase < period { + Ok(Self::Mortal { period, phase }) + } else { + Err("Invalid period and phase".into()) + } + } + } +} + +/// Define manually how to encode an Era given some type information. Here we +/// basically check that the type we're targeting is called "Era" and then codec::Encode. +impl EncodeAsType for Era { + fn encode_as_type_to( + &self, + type_id: R::TypeId, + types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + // Visit the type to check that it is an Era. This is only a rough check. + let visitor = scale_type_resolver::visitor::new((), |_, _| false) + .visit_variant(|_, path, _variants| path.last() == Some("Era")); + + let is_era = types + .resolve_type(type_id.clone(), visitor) + .unwrap_or_default(); + if !is_era { + return Err(scale_encode::Error::custom_string(format!( + "Type {type_id:?} is not a valid Era type; expecting either Immortal or MortalX variant" + ))); + } + + // if the type looks valid then just scale encode our Era. + self.encode_to(out); + Ok(()) + } +} + +/// Define manually how to decode an Era given some type information. Here we check that the +/// variant we're decoding is one of the expected Era variants, and that the field is correct if so, +/// ensuring that this will fail if trying to decode something that isn't an Era. +pub struct EraVisitor(core::marker::PhantomData); + +impl IntoVisitor for Era { + type AnyVisitor = EraVisitor; + fn into_visitor() -> Self::AnyVisitor { + EraVisitor(core::marker::PhantomData) + } +} + +impl Visitor for EraVisitor { + type Value<'scale, 'resolver> = Era; + type Error = scale_decode::Error; + type TypeResolver = R; + + // Unwrap any newtype wrappers around the era, eg the CheckMortality extension (which actually + // has 2 fields, but scale_info seems to automatically ignore the PhantomData field). This + // allows us to decode directly from CheckMortality into Era. + fn visit_composite<'scale, 'resolver>( + self, + value: &mut Composite<'scale, 'resolver, Self::TypeResolver>, + _type_id: TypeIdFor, + ) -> Result, Self::Error> { + if value.remaining() != 1 { + return Err(scale_decode::Error::custom_string(format!( + "Expected any wrapper around Era to have exactly one field, but got {} fields", + value.remaining() + ))); + } + + value + .decode_item(self) + .expect("1 field expected; checked above.") + } + + fn visit_variant<'scale, 'resolver>( + self, + value: &mut Variant<'scale, 'resolver, Self::TypeResolver>, + _type_id: TypeIdFor, + ) -> Result, Self::Error> { + let variant = value.name(); + + // If the variant is immortal, we know the outcome. + if variant == "Immortal" { + return Ok(Era::Immortal); + } + + // Otherwise, we expect a variant Mortal1..Mortal255 where the number + // here is the first byte, and the second byte is conceptually a field of this variant. + // This weird encoding is because the Era is compressed to just 1 byte if immortal and + // just 2 bytes if mortal. + // + // Note: We _could_ just assume we'll have 2 bytes to work with and decode the era directly, + // but checking the variant names ensures that the thing we think is an Era actually _is_ + // one, based on the type info for it. + let first_byte = variant + .strip_prefix("Mortal") + .and_then(|s| s.parse::().ok()) + .ok_or_else(|| { + scale_decode::Error::custom_string(format!( + "Expected MortalX variant, but got {variant}" + )) + })?; + + // We need 1 field in the MortalN variant containing the second byte. + let mortal_fields = value.fields(); + if mortal_fields.remaining() != 1 { + return Err(scale_decode::Error::custom_string(format!( + "Expected Mortal{} to have one u8 field, but got {} fields", + first_byte, + mortal_fields.remaining() + ))); + } + + let second_byte = mortal_fields + .decode_item(u8::into_visitor()) + .expect("At least one field should exist; checked above.") + .map_err(|e| { + scale_decode::Error::custom_string(format!( + "Expected mortal variant field to be u8, but: {e}" + )) + })?; + + // Now that we have both bytes we can decode them into the era using + // the same logic as the codec::Decode impl does. + Era::decode(&mut &[first_byte, second_byte][..]).map_err(|e| { + scale_decode::Error::custom_string(format!( + "Failed to codec::Decode Era from Mortal bytes: {e}" + )) + }) + } +} diff --git a/new/src/utils/multi_address.rs b/new/src/utils/multi_address.rs new file mode 100644 index 0000000000..9fb8a575ed --- /dev/null +++ b/new/src/utils/multi_address.rs @@ -0,0 +1,43 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! The "default" Substrate/Polkadot Address type. This is used in codegen, as well as signing related bits. +//! This doesn't contain much functionality itself, but is easy to convert to/from an `sp_runtime::MultiAddress` +//! for instance, to gain functionality without forcing a dependency on Substrate crates here. + +use codec::{Decode, Encode}; + +/// A multi-format address wrapper for on-chain accounts. This is a simplified version of Substrate's +/// `sp_runtime::MultiAddress`. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + scale_encode::EncodeAsType, + scale_decode::DecodeAsType, + scale_info::TypeInfo, +)] +pub enum MultiAddress { + /// It's an account ID (pubkey). + Id(AccountId), + /// It's an account index. + Index(#[codec(compact)] AccountIndex), + /// It's some arbitrary raw bytes. + Raw(Vec), + /// It's a 32 byte representation. + Address32([u8; 32]), + /// Its a 20 byte representation. + Address20([u8; 20]), +} + +impl From for MultiAddress { + fn from(a: AccountId) -> Self { + Self::Id(a) + } +} diff --git a/new/src/utils/multi_signature.rs b/new/src/utils/multi_signature.rs new file mode 100644 index 0000000000..0f1c623a4f --- /dev/null +++ b/new/src/utils/multi_signature.rs @@ -0,0 +1,21 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! The "default" Substrate/Polkadot Signature type. This is used in codegen, as well as signing related bits. +//! This doesn't contain much functionality itself, but is easy to convert to/from an `sp_runtime::MultiSignature` +//! for instance, to gain functionality without forcing a dependency on Substrate crates here. + +use codec::{Decode, Encode}; + +/// Signature container that can store known signature types. This is a simplified version of +/// `sp_runtime::MultiSignature`. To obtain more functionality, convert this into that type. +#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug, scale_info::TypeInfo)] +pub enum MultiSignature { + /// An Ed25519 signature. + Ed25519([u8; 64]), + /// An Sr25519 signature. + Sr25519([u8; 64]), + /// An ECDSA/SECP256k1 signature (a 512-bit value, plus 8 bits for recovery ID). + Ecdsa([u8; 65]), +} diff --git a/new/src/utils/range_map.rs b/new/src/utils/range_map.rs new file mode 100644 index 0000000000..a98674a288 --- /dev/null +++ b/new/src/utils/range_map.rs @@ -0,0 +1,159 @@ +use std::fmt::Display; + +/// A map that associates ranges of keys with values. +#[derive(Debug, Clone)] +pub struct RangeMap { + // (range_start, range_ended, value). This is + // guaranteed to be sorted and have non-overlapping ranges. + mapping: Vec<(K, K, V)>, +} + +impl RangeMap { + /// Build an empty [`RangeMap`] as a placeholder. + pub fn empty() -> Self { + RangeMap { + mapping: Vec::new(), + } + } + + /// Build a [`RangeMap`]. + pub fn builder() -> RangeMapBuilder { + RangeMapBuilder { + mapping: Vec::new(), + } + } + + /// Return the value whose key is within the range, or None if not found. + pub fn get(&self, key: K) -> Option<&V> { + let idx = self + .mapping + .binary_search_by_key(&key, |&(start, end, _)| { + if key >= start && key < end { + key + } else { + start + } + }) + .ok()?; + + self.mapping.get(idx).map(|(_, _, val)| val) + } +} + +/// A builder for constructing a [`RangeMap`]. Use [``RangeMap::builder()`] to create one. +#[derive(Debug, Clone)] +pub struct RangeMapBuilder { + mapping: Vec<(K, K, V)>, +} + +impl RangeMapBuilder { + /// Try to add a range, mapping block numbers to a spec version. + /// + /// Returns an error if the range is empty or overlaps with an existing range. + pub fn try_add_range( + &mut self, + start: K, + end: K, + val: V, + ) -> Result<&mut Self, RangeMapError> { + let (start, end) = if start < end { + (start, end) + } else { + (end, start) + }; + + if start == end { + return Err(RangeMapError::EmptyRange(start)); + } + + if let Some(&(s, e, _)) = self.mapping.iter().find(|&&(s, e, _)| start < e && end > s) { + return Err(RangeMapError::OverlappingRanges { + proposed: (start, end), + existing: (s, e), + }); + } + + self.mapping.push((start, end, val)); + Ok(self) + } + + /// Add a range of blocks with the given spec version. + /// + /// # Panics + /// + /// This method will panic if the range is empty or overlaps with an existing range. + pub fn add_range(mut self, start: K, end: K, val: V) -> Self { + if let Err(e) = self.try_add_range(start, end, val) { + panic!("{e}") + } + self + } + + /// Finish adding ranges and build the [`RangeMap`]. + pub fn build(mut self) -> RangeMap { + self.mapping.sort_by_key(|&(start, _, _)| start); + RangeMap { + mapping: self.mapping, + } + } +} + +/// An error that can occur when calling [`RangeMapBuilder::try_add_range()`]. +#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] +pub enum RangeMapError { + /// An error indicating that the proposed block range is empty. + #[error("Block range cannot be empty: start and end values must be different, but got {} for both", .0)] + EmptyRange(K), + /// An error indicating that the proposed block range overlaps with an existing one. + #[error("Overlapping block ranges are not allowed: proposed range is {}..{}, but we already have {}..{}", proposed.0, proposed.1, existing.0, existing.1)] + OverlappingRanges { + /// The range being proposed / added. + proposed: (K, K), + /// The existing range which overlaps. + existing: (K, K) + }, +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_rangemap_get() { + let spec_version = RangeMap::builder() + .add_range(0, 100, 1) + .add_range(100, 200, 2) + .add_range(200, 300, 3) + .build(); + + assert_eq!(spec_version.get(0), Some(&1)); + assert_eq!(spec_version.get(50), Some(&1)); + assert_eq!(spec_version.get(100), Some(&2)); + assert_eq!(spec_version.get(150), Some(&2)); + assert_eq!(spec_version.get(200), Some(&3)); + assert_eq!(spec_version.get(250), Some(&3)); + assert_eq!(spec_version.get(300), None); + } + + #[test] + fn test_rangemap_set() { + let mut spec_version = RangeMap::builder() + .add_range(0, 100, 1) + .add_range(200, 300, 3); + + assert_eq!( + spec_version.try_add_range(99, 130, 2).unwrap_err(), + RangeMapError::OverlappingRanges { + proposed: (99, 130), + existing: (0, 100), + } + ); + assert_eq!( + spec_version.try_add_range(170, 201, 2).unwrap_err(), + RangeMapError::OverlappingRanges { + proposed: (170, 201), + existing: (200, 300), + } + ); + } +} diff --git a/new/src/utils/static_type.rs b/new/src/utils/static_type.rs new file mode 100644 index 0000000000..0969c8f85f --- /dev/null +++ b/new/src/utils/static_type.rs @@ -0,0 +1,79 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use codec::{Decode, Encode}; +use scale_decode::{IntoVisitor, TypeResolver, Visitor, visitor::DecodeAsTypeResult}; +use scale_encode::EncodeAsType; + +/// If the type inside this implements [`Encode`], this will implement [`scale_encode::EncodeAsType`]. +/// If the type inside this implements [`Decode`], this will implement [`scale_decode::DecodeAsType`]. +/// +/// In either direction, we ignore any type information and just attempt to encode/decode statically +/// via the [`Encode`] and [`Decode`] implementations. This can be useful as an adapter for types which +/// do not implement [`scale_encode::EncodeAsType`] and [`scale_decode::DecodeAsType`] themselves, but +/// it's best to avoid using it where possible as it will not take into account any type information, +/// and is thus more likely to encode or decode incorrectly. +#[derive(Debug, Encode, Decode, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] +pub struct Static(pub T); + +impl EncodeAsType for Static { + fn encode_as_type_to( + &self, + _type_id: R::TypeId, + _types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + self.0.encode_to(out); + Ok(()) + } +} + +pub struct StaticDecodeAsTypeVisitor(core::marker::PhantomData<(T, R)>); + +impl Visitor for StaticDecodeAsTypeVisitor { + type Value<'scale, 'info> = Static; + type Error = scale_decode::Error; + type TypeResolver = R; + + fn unchecked_decode_as_type<'scale, 'info>( + self, + input: &mut &'scale [u8], + _type_id: R::TypeId, + _types: &'info R, + ) -> DecodeAsTypeResult, Self::Error>> { + use scale_decode::{Error, visitor::DecodeError}; + let decoded = T::decode(input) + .map(Static) + .map_err(|e| Error::new(DecodeError::CodecError(e).into())); + DecodeAsTypeResult::Decoded(decoded) + } +} + +impl IntoVisitor for Static { + type AnyVisitor = StaticDecodeAsTypeVisitor; + fn into_visitor() -> StaticDecodeAsTypeVisitor { + StaticDecodeAsTypeVisitor(core::marker::PhantomData) + } +} + +// Make it easy to convert types into Static where required. +impl From for Static { + fn from(value: T) -> Self { + Static(value) + } +} + +// Static is just a marker type and should be as transparent as possible: +impl core::ops::Deref for Static { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl core::ops::DerefMut for Static { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} diff --git a/new/src/utils/unchecked_extrinsic.rs b/new/src/utils/unchecked_extrinsic.rs new file mode 100644 index 0000000000..16ce942a2f --- /dev/null +++ b/new/src/utils/unchecked_extrinsic.rs @@ -0,0 +1,137 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +//! The "default" Substrate/Polkadot UncheckedExtrinsic. +//! This is used in codegen for runtime API calls. +//! +//! The inner bytes represent the encoded extrinsic expected by the +//! runtime APIs. Deriving `EncodeAsType` would lead to the inner +//! bytes to be re-encoded (length prefixed). + +use core::marker::PhantomData; +use codec::{Decode, Encode}; +use scale_decode::{DecodeAsType, IntoVisitor, TypeResolver, Visitor, visitor::DecodeAsTypeResult}; +use super::{Encoded, Static}; + +/// The unchecked extrinsic from substrate. +#[derive(Clone, Debug, Eq, PartialEq, Encode)] +pub struct UncheckedExtrinsic( + Static, + #[codec(skip)] PhantomData<(Address, Call, Signature, Extra)>, +); + +impl UncheckedExtrinsic { + /// Construct a new [`UncheckedExtrinsic`]. + pub fn new(bytes: Vec) -> Self { + Self(Static(Encoded(bytes)), PhantomData) + } + + /// Get the bytes of the encoded extrinsic. + pub fn bytes(&self) -> &[u8] { + self.0.0.0.as_slice() + } +} + +impl Decode + for UncheckedExtrinsic +{ + fn decode(input: &mut I) -> Result { + // The bytes for an UncheckedExtrinsic are first a compact + // encoded length, and then the bytes following. This is the + // same encoding as a Vec, so easiest ATM is just to decode + // into that, and then encode the vec bytes to get our extrinsic + // bytes, which we save into an `Encoded` to preserve as-is. + let xt_vec: Vec = Decode::decode(input)?; + Ok(UncheckedExtrinsic::new(xt_vec)) + } +} + +impl scale_encode::EncodeAsType + for UncheckedExtrinsic +{ + fn encode_as_type_to( + &self, + type_id: R::TypeId, + types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + self.0.encode_as_type_to(type_id, types, out) + } +} + +impl From> + for UncheckedExtrinsic +{ + fn from(bytes: Vec) -> Self { + UncheckedExtrinsic::new(bytes) + } +} + +impl From> + for Vec +{ + fn from(bytes: UncheckedExtrinsic) -> Self { + bytes.0.0.0 + } +} + +pub struct UncheckedExtrinsicDecodeAsTypeVisitor( + PhantomData<(Address, Call, Signature, Extra, R)>, +); + +impl Visitor + for UncheckedExtrinsicDecodeAsTypeVisitor +{ + type Value<'scale, 'info> = UncheckedExtrinsic; + type Error = scale_decode::Error; + type TypeResolver = R; + + fn unchecked_decode_as_type<'scale, 'info>( + self, + input: &mut &'scale [u8], + type_id: R::TypeId, + types: &'info R, + ) -> DecodeAsTypeResult, Self::Error>> { + DecodeAsTypeResult::Decoded(Self::Value::decode_as_type(input, type_id, types)) + } +} + +impl IntoVisitor + for UncheckedExtrinsic +{ + type AnyVisitor = + UncheckedExtrinsicDecodeAsTypeVisitor; + + fn into_visitor() + -> UncheckedExtrinsicDecodeAsTypeVisitor { + UncheckedExtrinsicDecodeAsTypeVisitor(PhantomData) + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + + #[test] + fn unchecked_extrinsic_encoding() { + // A tx is basically some bytes with a compact length prefix; ie an encoded vec: + let tx_bytes = vec![1u8, 2, 3].encode(); + + let unchecked_extrinsic = UncheckedExtrinsic::<(), (), (), ()>::new(tx_bytes.clone()); + let encoded_tx_bytes = unchecked_extrinsic.encode(); + + // The encoded representation must not alter the provided bytes. + assert_eq!(tx_bytes, encoded_tx_bytes); + + // However, for decoding we expect to be able to read the extrinsic from the wire + // which would be length prefixed. + let decoded_tx = UncheckedExtrinsic::<(), (), (), ()>::decode(&mut &tx_bytes[..]).unwrap(); + let decoded_tx_bytes = decoded_tx.bytes(); + let encoded_tx_bytes = decoded_tx.encode(); + + assert_eq!(decoded_tx_bytes, encoded_tx_bytes); + // Ensure we can decode the tx and fetch only the tx bytes. + assert_eq!(vec![1, 2, 3], encoded_tx_bytes); + } +} diff --git a/new/src/utils/wrapper_opaque.rs b/new/src/utils/wrapper_opaque.rs new file mode 100644 index 0000000000..3e885149ac --- /dev/null +++ b/new/src/utils/wrapper_opaque.rs @@ -0,0 +1,237 @@ +// Copyright 2019-2024 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +use super::PhantomDataSendSync; +use codec::{Compact, Decode, DecodeAll, Encode}; +use derive_where::derive_where; +use scale_decode::{IntoVisitor, TypeResolver, Visitor, ext::scale_type_resolver::visitor}; +use scale_encode::EncodeAsType; + +/// A wrapper for any type `T` which implement encode/decode in a way compatible with `Vec`. +/// [`WrapperKeepOpaque`] stores the type only in its opaque format, aka as a `Vec`. To +/// access the real type `T` [`Self::try_decode`] needs to be used. +// Dev notes: +// +// - This is adapted from [here](https://github.com/paritytech/substrate/blob/master/frame/support/src/traits/misc.rs). +// - The encoded bytes will be a compact encoded length followed by that number of bytes. +// - However, the TypeInfo describes the type as a composite with first a compact encoded length and next the type itself. +// [`Encode`] and [`Decode`] impls will "just work" to take this into a `Vec`, but we need a custom [`EncodeAsType`] +// and [`Visitor`] implementation to encode and decode based on TypeInfo. +#[derive(Encode, Decode)] +#[derive_where(Debug, Clone, PartialEq, Eq, Default, Hash)] +pub struct WrapperKeepOpaque { + data: Vec, + _phantom: PhantomDataSendSync, +} + +impl WrapperKeepOpaque { + /// Try to decode the wrapped type from the inner `data`. + /// + /// Returns `None` if the decoding failed. + pub fn try_decode(&self) -> Option + where + T: Decode, + { + T::decode_all(&mut &self.data[..]).ok() + } + + /// Returns the length of the encoded `T`. + pub fn encoded_len(&self) -> usize { + self.data.len() + } + + /// Returns the encoded data. + pub fn encoded(&self) -> &[u8] { + &self.data + } + + /// Create from the given encoded `data`. + pub fn from_encoded(data: Vec) -> Self { + Self { + data, + _phantom: PhantomDataSendSync::new(), + } + } + + /// Create from some raw value by encoding it. + pub fn from_value(value: T) -> Self + where + T: Encode, + { + Self { + data: value.encode(), + _phantom: PhantomDataSendSync::new(), + } + } +} + +impl EncodeAsType for WrapperKeepOpaque { + fn encode_as_type_to( + &self, + type_id: R::TypeId, + types: &R, + out: &mut Vec, + ) -> Result<(), scale_encode::Error> { + use scale_encode::error::{Error, ErrorKind, Kind}; + + let ctx = (type_id.clone(), out); + let visitor = visitor::new(ctx, |(type_id, _out), _| { + // Check that the target shape lines up: any other shape but composite is wrong. + Err(Error::new(ErrorKind::WrongShape { + actual: Kind::Struct, + expected_id: format!("{type_id:?}"), + })) + }) + .visit_composite(|(_type_id, out), _path, _fields| { + self.data.encode_to(out); + Ok(()) + }); + + types + .resolve_type(type_id.clone(), visitor) + .map_err(|_| Error::new(ErrorKind::TypeNotFound(format!("{type_id:?}"))))? + } +} + +pub struct WrapperKeepOpaqueVisitor(core::marker::PhantomData<(T, R)>); +impl Visitor for WrapperKeepOpaqueVisitor { + type Value<'scale, 'info> = WrapperKeepOpaque; + type Error = scale_decode::Error; + type TypeResolver = R; + + fn visit_composite<'scale, 'info>( + self, + value: &mut scale_decode::visitor::types::Composite<'scale, 'info, R>, + _type_id: R::TypeId, + ) -> Result, Self::Error> { + use scale_decode::error::{Error, ErrorKind}; + use scale_decode::visitor::DecodeError; + + if value.name() != Some("WrapperKeepOpaque") { + return Err(Error::new(ErrorKind::VisitorDecodeError( + DecodeError::TypeResolvingError(format!( + "Expected a type named 'WrapperKeepOpaque', got: {:?}", + value.name() + )), + ))); + } + + if value.remaining() != 2 { + return Err(Error::new(ErrorKind::WrongLength { + actual_len: value.remaining(), + expected_len: 2, + })); + } + + // The field to decode is a compact len followed by bytes. Decode the length, then grab the bytes. + let Compact(len) = value + .decode_item(Compact::::into_visitor()) + .expect("length checked")?; + let field = value.next().expect("length checked")?; + + // Sanity check that the compact length we decoded lines up with the number of bytes encoded in the next field. + if field.bytes().len() != len as usize { + return Err(Error::custom_str( + "WrapperTypeKeepOpaque compact encoded length doesn't line up with encoded byte len", + )); + } + + Ok(WrapperKeepOpaque { + data: field.bytes().to_vec(), + _phantom: PhantomDataSendSync::new(), + }) + } +} + +impl IntoVisitor for WrapperKeepOpaque { + type AnyVisitor = WrapperKeepOpaqueVisitor; + fn into_visitor() -> WrapperKeepOpaqueVisitor { + WrapperKeepOpaqueVisitor(core::marker::PhantomData) + } +} + +#[cfg(test)] +mod test { + use scale_decode::DecodeAsType; + + use super::*; + + // Copied from https://github.com/paritytech/substrate/blob/master/frame/support/src/traits/misc.rs + // and used for tests to check that we can work with the expected TypeInfo without needing to import + // the frame_support crate, which has quite a lot of dependencies. + impl scale_info::TypeInfo for WrapperKeepOpaque { + type Identity = Self; + fn type_info() -> scale_info::Type { + use scale_info::{Path, Type, TypeParameter, build::Fields, meta_type}; + + Type::builder() + .path(Path::new("WrapperKeepOpaque", module_path!())) + .type_params(vec![TypeParameter::new("T", Some(meta_type::()))]) + .composite( + Fields::unnamed() + .field(|f| f.compact::()) + .field(|f| f.ty::().type_name("T")), + ) + } + } + + /// Given a type definition, return type ID and registry representing it. + fn make_type() -> (u32, scale_info::PortableRegistry) { + let m = scale_info::MetaType::new::(); + let mut types = scale_info::Registry::new(); + let id = types.register_type(&m); + let portable_registry: scale_info::PortableRegistry = types.into(); + (id.id, portable_registry) + } + + fn roundtrips_like_scale_codec(t: T) + where + T: EncodeAsType + + DecodeAsType + + Encode + + Decode + + PartialEq + + core::fmt::Debug + + scale_info::TypeInfo + + 'static, + { + let (type_id, types) = make_type::(); + + let scale_codec_encoded = t.encode(); + let encode_as_type_encoded = t.encode_as_type(type_id, &types).unwrap(); + + assert_eq!( + scale_codec_encoded, encode_as_type_encoded, + "encoded bytes should match" + ); + + let decode_as_type_bytes = &mut &*scale_codec_encoded; + let decoded_as_type = T::decode_as_type(decode_as_type_bytes, type_id, &types) + .expect("decode-as-type decodes"); + + let decode_scale_codec_bytes = &mut &*scale_codec_encoded; + let decoded_scale_codec = T::decode(decode_scale_codec_bytes).expect("scale-codec decodes"); + + assert!( + decode_as_type_bytes.is_empty(), + "no bytes should remain in decode-as-type impl" + ); + assert!( + decode_scale_codec_bytes.is_empty(), + "no bytes should remain in codec-decode impl" + ); + + assert_eq!( + decoded_as_type, decoded_scale_codec, + "decoded values should match" + ); + } + + #[test] + fn wrapper_keep_opaque_roundtrips_ok() { + roundtrips_like_scale_codec(WrapperKeepOpaque::from_value(123u64)); + roundtrips_like_scale_codec(WrapperKeepOpaque::from_value(true)); + roundtrips_like_scale_codec(WrapperKeepOpaque::from_value(vec![1u8, 2, 3, 4])); + } +} diff --git a/new/src/utils/yesnomaybe.rs b/new/src/utils/yesnomaybe.rs new file mode 100644 index 0000000000..18a878d942 --- /dev/null +++ b/new/src/utils/yesnomaybe.rs @@ -0,0 +1,82 @@ +// Copyright 2019-2025 Parity Technologies (UK) Ltd. +// This file is dual-licensed as Apache-2.0 or GPL-3.0. +// see LICENSE for license details. + +/// A unit marker enum. +pub enum Yes {} +/// A unit marker enum. +pub enum Maybe {} +/// A unit marker enum. +pub enum No {} + +/// This is implemented for [`Yes`] and [`No`] and +/// allows us to check at runtime which of these types is present. +pub trait YesNo { + /// [`Yes`] + fn is_yes() -> bool { + false + } + /// [`No`] + fn is_no() -> bool { + false + } +} + +impl YesNo for Yes { + fn is_yes() -> bool { + true + } +} +impl YesNo for No { + fn is_no() -> bool { + true + } +} + +/// This is implemented for [`Yes`] and [`Maybe`] and +/// allows us to check at runtime which of these types is present. +pub trait YesMaybe { + /// [`Yes`] + fn is_yes() -> bool { + false + } + /// [`Maybe`] + fn is_maybe() -> bool { + false + } +} + +impl YesMaybe for Yes { + fn is_yes() -> bool { + true + } +} +impl YesMaybe for Maybe { + fn is_maybe() -> bool { + true + } +} + +/// This is implemented for [`No`] and [`Maybe`] and +/// allows us to check at runtime which of these types is present. +pub trait NoMaybe { + /// [`No`] + fn is_no() -> bool { + false + } + /// [`Maybe`] + fn is_maybe() -> bool { + false + } +} + +impl NoMaybe for No { + fn is_no() -> bool { + true + } +} +impl NoMaybe for Maybe { + fn is_maybe() -> bool { + true + } +} diff --git a/rpcs/Cargo.toml b/rpcs/Cargo.toml index e0d86be376..e18e1db71e 100644 --- a/rpcs/Cargo.toml +++ b/rpcs/Cargo.toml @@ -97,7 +97,6 @@ jsonrpsee = { workspace = true, features = ["server"] } [package.metadata.docs.rs] default-features = true -rustdoc-args = ["--cfg", "docsrs"] [lints] workspace = true diff --git a/signer/Cargo.toml b/signer/Cargo.toml index 876d5353a2..867c785eab 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -95,7 +95,6 @@ ignored = ["getrandom"] [package.metadata.docs.rs] default-features = true -rustdoc-args = ["--cfg", "docsrs"] [package.metadata.playground] default-features = true diff --git a/subxt/Cargo.toml b/subxt/Cargo.toml index 08d7b55eb5..72e44a46d8 100644 --- a/subxt/Cargo.toml +++ b/subxt/Cargo.toml @@ -157,7 +157,6 @@ required-features = ["reconnecting-rpc-client"] [package.metadata.docs.rs] features = ["default", "unstable-light-client"] -rustdoc-args = ["--cfg", "docsrs"] [package.metadata.playground] features = ["default", "unstable-light-client"] diff --git a/utils/fetch-metadata/Cargo.toml b/utils/fetch-metadata/Cargo.toml index 1fcf761343..5b61393d4e 100644 --- a/utils/fetch-metadata/Cargo.toml +++ b/utils/fetch-metadata/Cargo.toml @@ -29,7 +29,6 @@ frame-metadata = { workspace = true, optional = true, features = ["std"] } [package.metadata.docs.rs] features = ["url"] -rustdoc-args = ["--cfg", "docsrs"] [package.metadata.playground] default-features = true diff --git a/utils/strip-metadata/Cargo.toml b/utils/strip-metadata/Cargo.toml index 391f95eb7f..9fa5195f88 100644 --- a/utils/strip-metadata/Cargo.toml +++ b/utils/strip-metadata/Cargo.toml @@ -21,7 +21,6 @@ either = { workspace = true } [package.metadata.docs.rs] features = ["url"] -rustdoc-args = ["--cfg", "docsrs"] [package.metadata.playground] default-features = true